From 9778a78667dd607597324d2b78da872a87dcc5ed Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Thu, 7 Dec 2023 15:41:47 +0000 Subject: [PATCH] [wip] update resolver to check containers-storage --- go.mod | 17 + go.sum | 48 + pkg/container/client.go | 30 +- pkg/container/client_test.go | 6 +- pkg/container/resolver.go | 2 +- pkg/container/spec.go | 15 +- vendor/dario.cat/mergo/.deepsource.toml | 12 + vendor/dario.cat/mergo/.gitignore | 33 + vendor/dario.cat/mergo/.travis.yml | 12 + vendor/dario.cat/mergo/CODE_OF_CONDUCT.md | 46 + vendor/dario.cat/mergo/CONTRIBUTING.md | 112 + vendor/dario.cat/mergo/LICENSE | 28 + vendor/dario.cat/mergo/README.md | 248 + vendor/dario.cat/mergo/SECURITY.md | 14 + vendor/dario.cat/mergo/doc.go | 148 + vendor/dario.cat/mergo/map.go | 178 + vendor/dario.cat/mergo/merge.go | 409 + vendor/dario.cat/mergo/mergo.go | 81 + .../Microsoft/go-winio/.gitattributes | 1 + .../github.com/Microsoft/go-winio/.gitignore | 10 + .../Microsoft/go-winio/.golangci.yml | 149 + .../github.com/Microsoft/go-winio/CODEOWNERS | 1 + vendor/github.com/Microsoft/go-winio/LICENSE | 22 + .../github.com/Microsoft/go-winio/README.md | 89 + .../github.com/Microsoft/go-winio/SECURITY.md | 41 + .../github.com/Microsoft/go-winio/backup.go | 290 + .../Microsoft/go-winio/backuptar/doc.go | 3 + .../Microsoft/go-winio/backuptar/strconv.go | 70 + .../Microsoft/go-winio/backuptar/tar.go | 509 + vendor/github.com/Microsoft/go-winio/doc.go | 22 + vendor/github.com/Microsoft/go-winio/ea.go | 137 + vendor/github.com/Microsoft/go-winio/file.go | 331 + .../github.com/Microsoft/go-winio/fileinfo.go | 92 + .../github.com/Microsoft/go-winio/hvsock.go | 575 + .../Microsoft/go-winio/internal/fs/doc.go | 2 + .../Microsoft/go-winio/internal/fs/fs.go | 202 + .../go-winio/internal/fs/security.go | 12 + .../go-winio/internal/fs/zsyscall_windows.go | 64 + .../go-winio/internal/socket/rawaddr.go | 20 + .../go-winio/internal/socket/socket.go | 179 + .../internal/socket/zsyscall_windows.go | 72 + .../go-winio/internal/stringbuffer/wstring.go | 132 + vendor/github.com/Microsoft/go-winio/pipe.go | 525 + .../Microsoft/go-winio/pkg/guid/guid.go | 232 + .../go-winio/pkg/guid/guid_nonwindows.go | 16 + .../go-winio/pkg/guid/guid_windows.go | 13 + .../go-winio/pkg/guid/variant_string.go | 27 + .../Microsoft/go-winio/privilege.go | 197 + .../github.com/Microsoft/go-winio/reparse.go | 131 + vendor/github.com/Microsoft/go-winio/sd.go | 144 + .../github.com/Microsoft/go-winio/syscall.go | 5 + vendor/github.com/Microsoft/go-winio/tools.go | 5 + .../github.com/Microsoft/go-winio/vhd/vhd.go | 377 + .../Microsoft/go-winio/vhd/zvhd_windows.go | 108 + .../Microsoft/go-winio/zsyscall_windows.go | 419 + .../Microsoft/hcsshim/.gitattributes | 3 + .../github.com/Microsoft/hcsshim/.gitignore | 53 + .../Microsoft/hcsshim/.golangci.yml | 167 + .../github.com/Microsoft/hcsshim/CODEOWNERS | 1 + vendor/github.com/Microsoft/hcsshim/LICENSE | 21 + vendor/github.com/Microsoft/hcsshim/Makefile | 111 + .../Microsoft/hcsshim/Protobuild.toml | 25 + vendor/github.com/Microsoft/hcsshim/README.md | 102 + .../github.com/Microsoft/hcsshim/SECURITY.md | 41 + .../hcsshim/computestorage/attach.go | 40 + .../hcsshim/computestorage/destroy.go | 28 + .../hcsshim/computestorage/detach.go | 28 + .../hcsshim/computestorage/export.go | 48 + .../hcsshim/computestorage/format.go | 32 + .../hcsshim/computestorage/helpers.go | 199 + .../hcsshim/computestorage/import.go | 43 + .../hcsshim/computestorage/initialize.go | 40 + .../Microsoft/hcsshim/computestorage/mount.go | 28 + .../Microsoft/hcsshim/computestorage/setup.go | 80 + .../hcsshim/computestorage/storage.go | 53 + .../computestorage/zsyscall_windows.go | 332 + .../github.com/Microsoft/hcsshim/container.go | 225 + vendor/github.com/Microsoft/hcsshim/errors.go | 250 + .../github.com/Microsoft/hcsshim/hcsshim.go | 30 + .../Microsoft/hcsshim/hnsendpoint.go | 120 + .../Microsoft/hcsshim/hnsglobals.go | 18 + .../Microsoft/hcsshim/hnsnetwork.go | 38 + .../github.com/Microsoft/hcsshim/hnspolicy.go | 60 + .../Microsoft/hcsshim/hnspolicylist.go | 49 + .../Microsoft/hcsshim/hnssupport.go | 15 + .../github.com/Microsoft/hcsshim/interface.go | 116 + .../Microsoft/hcsshim/internal/cow/cow.go | 99 + .../hcsshim/internal/hcs/callback.go | 163 + .../Microsoft/hcsshim/internal/hcs/doc.go | 1 + .../Microsoft/hcsshim/internal/hcs/errors.go | 336 + .../Microsoft/hcsshim/internal/hcs/process.go | 583 + .../hcsshim/internal/hcs/schema1/schema1.go | 252 + .../internal/hcs/schema2/attachment.go | 36 + .../hcsshim/internal/hcs/schema2/battery.go | 13 + .../hcs/schema2/cache_query_stats_response.go | 18 + .../hcsshim/internal/hcs/schema2/chipset.go | 27 + .../hcsshim/internal/hcs/schema2/cim_mount.go | 25 + .../internal/hcs/schema2/close_handle.go | 16 + .../hcsshim/internal/hcs/schema2/com_port.go | 17 + .../internal/hcs/schema2/compute_system.go | 26 + .../internal/hcs/schema2/configuration.go | 72 + .../internal/hcs/schema2/console_size.go | 19 + .../hcsshim/internal/hcs/schema2/container.go | 36 + ...r_credential_guard_add_instance_request.go | 16 + ...edential_guard_hv_socket_service_config.go | 15 + .../container_credential_guard_instance.go | 16 + ...ainer_credential_guard_modify_operation.go | 17 + ...iner_credential_guard_operation_request.go | 15 + ...redential_guard_remove_instance_request.go | 14 + .../container_credential_guard_state.go | 25 + .../container_credential_guard_system_info.go | 14 + .../schema2/container_memory_information.go | 25 + .../hcsshim/internal/hcs/schema2/cpu_group.go | 15 + .../hcs/schema2/cpu_group_affinity.go | 15 + .../internal/hcs/schema2/cpu_group_config.go | 18 + .../hcs/schema2/cpu_group_configurations.go | 15 + .../hcs/schema2/cpu_group_operations.go | 18 + .../hcs/schema2/cpu_group_property.go | 23 + .../hcs/schema2/create_group_operation.go | 17 + .../internal/hcs/schema2/debug_options.go | 22 + .../hcs/schema2/delete_group_operation.go | 15 + .../hcsshim/internal/hcs/schema2/device.go | 27 + .../hcsshim/internal/hcs/schema2/devices.go | 46 + .../hcs/schema2/enhanced_mode_video.go | 14 + .../hcs/schema2/flexible_io_device.go | 18 + .../internal/hcs/schema2/guest_connection.go | 19 + .../hcs/schema2/guest_connection_info.go | 21 + .../hcs/schema2/guest_crash_reporting.go | 14 + .../hcsshim/internal/hcs/schema2/guest_os.go | 14 + .../internal/hcs/schema2/guest_state.go | 25 + .../schema2/host_processor_modify_request.go | 16 + .../internal/hcs/schema2/hosted_system.go | 16 + .../hcsshim/internal/hcs/schema2/hv_socket.go | 16 + .../internal/hcs/schema2/hv_socket_2.go | 15 + .../internal/hcs/schema2/hv_socket_address.go | 17 + .../hcs/schema2/hv_socket_service_config.go | 28 + .../hcs/schema2/hv_socket_system_config.go | 22 + .../hcs/schema2/interrupt_moderation_mode.go | 42 + .../internal/hcs/schema2/iov_settings.go | 22 + .../hcs/schema2/isolation_settings.go | 21 + .../hcsshim/internal/hcs/schema2/keyboard.go | 13 + .../hcsshim/internal/hcs/schema2/layer.go | 21 + .../hcs/schema2/linux_kernel_direct.go | 18 + .../internal/hcs/schema2/logical_processor.go | 18 + .../internal/hcs/schema2/mapped_directory.go | 20 + .../internal/hcs/schema2/mapped_pipe.go | 18 + .../hcsshim/internal/hcs/schema2/memory.go | 14 + .../hcsshim/internal/hcs/schema2/memory_2.go | 49 + .../hcs/schema2/memory_information_for_vm.go | 18 + .../internal/hcs/schema2/memory_stats.go | 19 + .../model_container_definition_device.go | 14 + .../hcs/schema2/model_device_category.go | 15 + .../hcs/schema2/model_device_extension.go | 15 + .../hcs/schema2/model_device_instance.go | 17 + .../hcs/schema2/model_device_namespace.go | 16 + .../hcs/schema2/model_interface_class.go | 16 + .../internal/hcs/schema2/model_namespace.go | 15 + .../hcs/schema2/model_object_directory.go | 18 + .../hcs/schema2/model_object_namespace.go | 16 + .../hcs/schema2/model_object_symlink.go | 18 + .../hcs/schema2/modification_request.go | 15 + .../hcs/schema2/modify_setting_request.go | 22 + .../hcsshim/internal/hcs/schema2/mouse.go | 13 + .../internal/hcs/schema2/network_adapter.go | 17 + .../internal/hcs/schema2/networking.go | 23 + .../hcs/schema2/pause_notification.go | 15 + .../internal/hcs/schema2/pause_options.go | 17 + .../hcsshim/internal/hcs/schema2/plan9.go | 14 + .../internal/hcs/schema2/plan9_share.go | 34 + .../internal/hcs/schema2/process_details.go | 33 + .../hcs/schema2/process_modify_request.go | 21 + .../hcs/schema2/process_parameters.go | 46 + .../internal/hcs/schema2/process_status.go | 24 + .../hcsshim/internal/hcs/schema2/processor.go | 18 + .../internal/hcs/schema2/processor_2.go | 23 + .../internal/hcs/schema2/processor_stats.go | 19 + .../hcs/schema2/processor_topology.go | 15 + .../internal/hcs/schema2/properties.go | 54 + .../internal/hcs/schema2/property_query.go | 15 + .../internal/hcs/schema2/property_type.go | 26 + .../hcs/schema2/rdp_connection_options.go | 16 + .../internal/hcs/schema2/registry_changes.go | 16 + .../internal/hcs/schema2/registry_key.go | 18 + .../internal/hcs/schema2/registry_value.go | 30 + .../internal/hcs/schema2/restore_state.go | 19 + .../internal/hcs/schema2/save_options.go | 19 + .../hcsshim/internal/hcs/schema2/scsi.go | 16 + .../internal/hcs/schema2/security_settings.go | 16 + .../hcs/schema2/service_properties.go | 18 + .../schema2/shared_memory_configuration.go | 14 + .../hcs/schema2/shared_memory_region.go | 22 + .../hcs/schema2/shared_memory_region_info.go | 16 + .../internal/hcs/schema2/silo_properties.go | 17 + .../internal/hcs/schema2/statistics.go | 29 + .../hcsshim/internal/hcs/schema2/storage.go | 21 + .../internal/hcs/schema2/storage_qo_s.go | 16 + .../internal/hcs/schema2/storage_stats.go | 21 + .../internal/hcs/schema2/system_time.go | 28 + .../hcs/schema2/time_zone_information.go | 26 + .../hcsshim/internal/hcs/schema2/topology.go | 16 + .../hcsshim/internal/hcs/schema2/uefi.go | 22 + .../internal/hcs/schema2/uefi_boot_entry.go | 22 + .../hcsshim/internal/hcs/schema2/version.go | 16 + .../internal/hcs/schema2/video_monitor.go | 18 + .../internal/hcs/schema2/virtual_machine.go | 36 + .../internal/hcs/schema2/virtual_node_info.go | 20 + .../hcs/schema2/virtual_p_mem_controller.go | 20 + .../hcs/schema2/virtual_p_mem_device.go | 18 + .../hcs/schema2/virtual_p_mem_mapping.go | 15 + .../hcs/schema2/virtual_pci_device.go | 16 + .../hcs/schema2/virtual_pci_function.go | 18 + .../internal/hcs/schema2/virtual_smb.go | 16 + .../internal/hcs/schema2/virtual_smb_share.go | 20 + .../hcs/schema2/virtual_smb_share_options.go | 62 + .../hcsshim/internal/hcs/schema2/vm_memory.go | 26 + .../hcs/schema2/vm_processor_limits.go | 22 + .../hcs/schema2/windows_crash_reporting.go | 16 + .../Microsoft/hcsshim/internal/hcs/service.go | 51 + .../Microsoft/hcsshim/internal/hcs/system.go | 872 ++ .../Microsoft/hcsshim/internal/hcs/utils.go | 64 + .../hcsshim/internal/hcs/waithelper.go | 82 + .../hcsshim/internal/hcserror/doc.go | 1 + .../hcsshim/internal/hcserror/hcserror.go | 52 + .../Microsoft/hcsshim/internal/hns/doc.go | 1 + .../Microsoft/hcsshim/internal/hns/hns.go | 23 + .../hcsshim/internal/hns/hnsendpoint.go | 338 + .../hcsshim/internal/hns/hnsfuncs.go | 51 + .../hcsshim/internal/hns/hnsglobals.go | 30 + .../hcsshim/internal/hns/hnsnetwork.go | 144 + .../hcsshim/internal/hns/hnspolicy.go | 110 + .../hcsshim/internal/hns/hnspolicylist.go | 203 + .../hcsshim/internal/hns/hnssupport.go | 51 + .../hcsshim/internal/hns/namespace.go | 113 + .../hcsshim/internal/hns/zsyscall_windows.go | 80 + .../Microsoft/hcsshim/internal/interop/doc.go | 1 + .../hcsshim/internal/interop/interop.go | 25 + .../internal/interop/zsyscall_windows.go | 51 + .../hcsshim/internal/jobobject/doc.go | 8 + .../hcsshim/internal/jobobject/iocp.go | 113 + .../hcsshim/internal/jobobject/jobobject.go | 669 + .../hcsshim/internal/jobobject/limits.go | 317 + .../Microsoft/hcsshim/internal/log/context.go | 118 + .../Microsoft/hcsshim/internal/log/format.go | 114 + .../Microsoft/hcsshim/internal/log/hook.go | 173 + .../hcsshim/internal/log/nopformatter.go | 12 + .../Microsoft/hcsshim/internal/log/scrub.go | 184 + .../hcsshim/internal/logfields/fields.go | 61 + .../hcsshim/internal/longpath/longpath.go | 24 + .../Microsoft/hcsshim/internal/memory/pool.go | 316 + .../hcsshim/internal/memory/types.go | 28 + .../hcsshim/internal/mergemaps/merge.go | 52 + .../Microsoft/hcsshim/internal/oc/errors.go | 69 + .../Microsoft/hcsshim/internal/oc/exporter.go | 86 + .../Microsoft/hcsshim/internal/oc/span.go | 58 + .../internal/protocol/guestrequest/types.go | 76 + .../Microsoft/hcsshim/internal/queue/mq.go | 92 + .../Microsoft/hcsshim/internal/safefile/do.go | 1 + .../hcsshim/internal/safefile/safeopen.go | 403 + .../internal/security/grantvmgroupaccess.go | 186 + .../internal/security/syscall_windows.go | 7 + .../internal/security/zsyscall_windows.go | 72 + .../hcsshim/internal/timeout/timeout.go | 74 + .../hcsshim/internal/vmcompute/doc.go | 1 + .../hcsshim/internal/vmcompute/vmcompute.go | 637 + .../internal/vmcompute/zsyscall_windows.go | 610 + .../hcsshim/internal/wclayer/activatelayer.go | 29 + .../internal/wclayer/baselayerreader.go | 216 + .../internal/wclayer/baselayerwriter.go | 183 + .../internal/wclayer/converttobaselayer.go | 157 + .../hcsshim/internal/wclayer/createlayer.go | 29 + .../internal/wclayer/createscratchlayer.go | 36 + .../internal/wclayer/deactivatelayer.go | 26 + .../hcsshim/internal/wclayer/destroylayer.go | 27 + .../Microsoft/hcsshim/internal/wclayer/doc.go | 4 + .../internal/wclayer/expandscratchsize.go | 142 + .../hcsshim/internal/wclayer/exportlayer.go | 107 + .../internal/wclayer/getlayermountpath.go | 52 + .../internal/wclayer/getsharedbaseimages.go | 31 + .../hcsshim/internal/wclayer/grantvmaccess.go | 28 + .../hcsshim/internal/wclayer/importlayer.go | 167 + .../hcsshim/internal/wclayer/layerexists.go | 30 + .../hcsshim/internal/wclayer/layerid.go | 24 + .../hcsshim/internal/wclayer/layerutils.go | 127 + .../hcsshim/internal/wclayer/legacy.go | 832 ++ .../hcsshim/internal/wclayer/nametoguid.go | 31 + .../hcsshim/internal/wclayer/preparelayer.go | 46 + .../hcsshim/internal/wclayer/processimage.go | 43 + .../internal/wclayer/unpreparelayer.go | 27 + .../hcsshim/internal/wclayer/wclayer.go | 34 + .../internal/wclayer/zsyscall_windows.go | 578 + .../hcsshim/internal/winapi/bindflt.go | 19 + .../hcsshim/internal/winapi/cimfs.go | 45 + .../hcsshim/internal/winapi/console.go | 46 + .../hcsshim/internal/winapi/devices.go | 15 + .../Microsoft/hcsshim/internal/winapi/doc.go | 3 + .../hcsshim/internal/winapi/elevation.go | 11 + .../hcsshim/internal/winapi/errors.go | 17 + .../hcsshim/internal/winapi/filesystem.go | 115 + .../hcsshim/internal/winapi/jobobject.go | 222 + .../hcsshim/internal/winapi/logon.go | 30 + .../hcsshim/internal/winapi/memory.go | 4 + .../Microsoft/hcsshim/internal/winapi/net.go | 3 + .../hcsshim/internal/winapi/offlinereg.go | 37 + .../Microsoft/hcsshim/internal/winapi/path.go | 12 + .../hcsshim/internal/winapi/process.go | 61 + .../hcsshim/internal/winapi/processor.go | 7 + .../hcsshim/internal/winapi/system.go | 55 + .../hcsshim/internal/winapi/thread.go | 13 + .../Microsoft/hcsshim/internal/winapi/user.go | 194 + .../hcsshim/internal/winapi/utils.go | 88 + .../hcsshim/internal/winapi/winapi.go | 3 + .../internal/winapi/zsyscall_windows.go | 791 + vendor/github.com/Microsoft/hcsshim/layer.go | 113 + .../hcsshim/osversion/osversion_windows.go | 74 + .../osversion/platform_compat_windows.go | 35 + .../hcsshim/osversion/windowsbuilds.go | 84 + .../github.com/Microsoft/hcsshim/process.go | 100 + .../Microsoft/hcsshim/zsyscall_windows.go | 57 + .../github.com/containerd/cgroups/v3/LICENSE | 201 + .../cgroups/v3/cgroup1/stats/doc.go | 17 + .../cgroups/v3/cgroup1/stats/metrics.pb.go | 1959 +++ .../cgroups/v3/cgroup1/stats/metrics.pb.txt | 771 + .../cgroups/v3/cgroup1/stats/metrics.proto | 158 + .../github.com/containerd/containerd/LICENSE | 191 + .../github.com/containerd/containerd/NOTICE | 16 + .../containerd/containerd/errdefs/errors.go | 92 + .../containerd/containerd/errdefs/grpc.go | 147 + .../stargz-snapshotter/estargz/LICENSE | 202 + .../stargz-snapshotter/estargz/build.go | 689 + .../estargz/errorutil/errors.go | 40 + .../stargz-snapshotter/estargz/estargz.go | 1223 ++ .../stargz-snapshotter/estargz/gzip.go | 237 + .../stargz-snapshotter/estargz/testutil.go | 2366 +++ .../stargz-snapshotter/estargz/types.go | 342 + .../image/v5/directory/directory_dest.go | 284 + .../image/v5/directory/directory_src.go | 102 + .../image/v5/directory/directory_transport.go | 188 + .../image/v5/docker/daemon/client.go | 101 + .../image/v5/docker/daemon/daemon_dest.go | 186 + .../image/v5/docker/daemon/daemon_src.go | 56 + .../v5/docker/daemon/daemon_transport.go | 219 + .../image/v5/openshift/openshift-copies.go | 1200 ++ .../image/v5/openshift/openshift.go | 226 + .../image/v5/openshift/openshift_dest.go | 248 + .../image/v5/openshift/openshift_src.go | 174 + .../image/v5/openshift/openshift_transport.go | 153 + .../containers/image/v5/ostree/ostree_dest.go | 517 + .../containers/image/v5/ostree/ostree_src.go | 450 + .../image/v5/ostree/ostree_transport.go | 242 + .../containers/image/v5/sif/load.go | 210 + .../github.com/containers/image/v5/sif/src.go | 206 + .../containers/image/v5/sif/transport.go | 160 + .../image/v5/storage/storage_dest.go | 958 ++ .../image/v5/storage/storage_image.go | 59 + .../image/v5/storage/storage_reference.go | 316 + .../image/v5/storage/storage_src.go | 403 + .../image/v5/storage/storage_transport.go | 416 + .../containers/image/v5/tarball/doc.go | 61 + .../image/v5/tarball/tarball_reference.go | 82 + .../image/v5/tarball/tarball_src.go | 234 + .../image/v5/tarball/tarball_transport.go | 75 + .../transports/alltransports/alltransports.go | 49 + .../transports/alltransports/docker_daemon.go | 9 + .../alltransports/docker_daemon_stub.go | 10 + .../v5/transports/alltransports/ostree.go | 9 + .../transports/alltransports/ostree_stub.go | 10 + .../v5/transports/alltransports/storage.go | 9 + .../transports/alltransports/storage_stub.go | 10 + .../github.com/containers/storage/.cirrus.yml | 186 + .../containers/storage/.dockerignore | 3 + .../github.com/containers/storage/.gitignore | 32 + .../containers/storage/.golangci.yml | 11 + vendor/github.com/containers/storage/.mailmap | 254 + .../containers/storage/CODE-OF-CONDUCT.md | 3 + .../containers/storage/CONTRIBUTING.md | 144 + vendor/github.com/containers/storage/Makefile | 94 + vendor/github.com/containers/storage/OWNERS | 32 + .../github.com/containers/storage/README.md | 46 + .../github.com/containers/storage/SECURITY.md | 3 + vendor/github.com/containers/storage/VERSION | 1 + vendor/github.com/containers/storage/check.go | 1153 ++ .../containers/storage/containers.go | 981 ++ .../containers/storage/deprecated.go | 216 + .../containers/storage/drivers/aufs/aufs.go | 781 + .../containers/storage/drivers/aufs/dirs.go | 64 + .../containers/storage/drivers/aufs/mount.go | 22 + .../storage/drivers/aufs/mount_linux.go | 7 + .../containers/storage/drivers/btrfs/btrfs.go | 695 + .../drivers/btrfs/dummy_unsupported.go | 4 + .../storage/drivers/btrfs/version.go | 27 + .../storage/drivers/btrfs/version_none.go | 15 + .../containers/storage/drivers/chown.go | 135 + .../storage/drivers/chown_darwin.go | 109 + .../containers/storage/drivers/chown_unix.go | 127 + .../storage/drivers/chown_windows.go | 21 + .../containers/storage/drivers/chroot_unix.go | 22 + .../storage/drivers/chroot_windows.go | 15 + .../storage/drivers/copy/copy_linux.go | 307 + .../storage/drivers/copy/copy_unsupported.go | 41 + .../containers/storage/drivers/counter.go | 68 + .../storage/drivers/devmapper/device_setup.go | 254 + .../storage/drivers/devmapper/deviceset.go | 2888 ++++ .../drivers/devmapper/devmapper_doc.go | 109 + .../storage/drivers/devmapper/driver.go | 271 + .../storage/drivers/devmapper/jsoniter.go | 8 + .../storage/drivers/devmapper/mount.go | 89 + .../containers/storage/drivers/driver.go | 472 + .../storage/drivers/driver_darwin.go | 12 + .../storage/drivers/driver_freebsd.go | 48 + .../storage/drivers/driver_linux.go | 202 + .../storage/drivers/driver_solaris.go | 96 + .../storage/drivers/driver_unsupported.go | 14 + .../storage/drivers/driver_windows.go | 12 + .../containers/storage/drivers/fsdiff.go | 229 + .../containers/storage/drivers/jsoniter.go | 5 + .../storage/drivers/overlay/check.go | 310 + .../storage/drivers/overlay/check_116.go | 45 + .../drivers/overlay/composefs_notsupported.go | 24 + .../drivers/overlay/composefs_supported.go | 219 + .../storage/drivers/overlay/jsoniter.go | 8 + .../storage/drivers/overlay/mount.go | 189 + .../storage/drivers/overlay/overlay.go | 2556 ++++ .../storage/drivers/overlay/overlay_cgo.go | 22 + .../storage/drivers/overlay/overlay_nocgo.go | 17 + .../drivers/overlay/overlay_unsupported.go | 8 + .../storage/drivers/overlay/randomid.go | 82 + .../drivers/overlayutils/overlayutils.go | 20 + .../storage/drivers/quota/projectquota.go | 453 + .../drivers/quota/projectquota_unsupported.go | 37 + .../storage/drivers/register/register_aufs.go | 9 + .../drivers/register/register_btrfs.go | 9 + .../drivers/register/register_devicemapper.go | 9 + .../drivers/register/register_overlay.go | 9 + .../storage/drivers/register/register_vfs.go | 6 + .../drivers/register/register_windows.go | 6 + .../storage/drivers/register/register_zfs.go | 9 + .../containers/storage/drivers/template.go | 52 + .../storage/drivers/vfs/copy_linux.go | 7 + .../storage/drivers/vfs/copy_unsupported.go | 10 + .../containers/storage/drivers/vfs/driver.go | 330 + .../drivers/windows/jsoniter_windows.go | 5 + .../storage/drivers/windows/windows.go | 1010 ++ .../storage/drivers/zfs/MAINTAINERS | 2 + .../containers/storage/drivers/zfs/zfs.go | 518 + .../storage/drivers/zfs/zfs_freebsd.go | 33 + .../storage/drivers/zfs/zfs_linux.go | 35 + .../storage/drivers/zfs/zfs_unsupported.go | 4 + .../github.com/containers/storage/errors.go | 65 + vendor/github.com/containers/storage/idset.go | 265 + .../github.com/containers/storage/images.go | 1097 ++ .../github.com/containers/storage/jsoniter.go | 5 + .../github.com/containers/storage/layers.go | 2508 ++++ .../containers/storage/lockfile_compat.go | 18 + .../storage/pkg/chrootarchive/archive.go | 176 + .../pkg/chrootarchive/archive_darwin.go | 18 + .../storage/pkg/chrootarchive/archive_unix.go | 209 + .../pkg/chrootarchive/archive_windows.go | 30 + .../storage/pkg/chrootarchive/chroot_linux.go | 120 + .../storage/pkg/chrootarchive/chroot_unix.go | 17 + .../storage/pkg/chrootarchive/diff.go | 23 + .../storage/pkg/chrootarchive/diff_darwin.go | 40 + .../storage/pkg/chrootarchive/diff_unix.go | 128 + .../storage/pkg/chrootarchive/diff_windows.go | 44 + .../storage/pkg/chrootarchive/init_darwin.go | 4 + .../storage/pkg/chrootarchive/init_unix.go | 29 + .../storage/pkg/chrootarchive/init_windows.go | 4 + .../storage/pkg/chrootarchive/jsoniter.go | 8 + .../storage/pkg/chunked/cache_linux.go | 664 + .../storage/pkg/chunked/compression.go | 25 + .../storage/pkg/chunked/compression_linux.go | 283 + .../storage/pkg/chunked/dump/dump.go | 230 + .../containers/storage/pkg/chunked/storage.go | 25 + .../storage/pkg/chunked/storage_linux.go | 1989 +++ .../pkg/chunked/storage_unsupported.go | 17 + .../containers/storage/pkg/config/config.go | 342 + .../storage/pkg/devicemapper/devmapper.go | 813 ++ .../storage/pkg/devicemapper/devmapper_log.go | 123 + .../pkg/devicemapper/devmapper_wrapper.go | 252 + .../devmapper_wrapper_deferred_remove.go | 32 + .../devicemapper/devmapper_wrapper_dynamic.go | 7 + .../devmapper_wrapper_no_deferred_remove.go | 16 + .../devicemapper/devmapper_wrapper_static.go | 7 + .../storage/pkg/devicemapper/ioctl.go | 29 + .../storage/pkg/devicemapper/log.go | 11 + .../storage/pkg/directory/directory.go | 31 + .../storage/pkg/directory/directory_unix.go | 62 + .../pkg/directory/directory_windows.go | 50 + .../storage/pkg/dmesg/dmesg_linux.go | 21 + .../storage/pkg/fsutils/fsutils_linux.go | 88 + .../storage/pkg/idmap/idmapped_utils.go | 88 + .../pkg/idmap/idmapped_utils_unsupported.go | 22 + .../containers/storage/pkg/locker/README.md | 65 + .../containers/storage/pkg/locker/locker.go | 112 + .../storage/pkg/loopback/attach_loopback.go | 173 + .../containers/storage/pkg/loopback/ioctl.go | 54 + .../storage/pkg/loopback/loop_wrapper.go | 53 + .../storage/pkg/loopback/loopback.go | 64 + .../pkg/loopback/loopback_unsupported.go | 1 + .../storage/pkg/parsers/kernel/kernel.go | 75 + .../pkg/parsers/kernel/kernel_darwin.go | 57 + .../storage/pkg/parsers/kernel/kernel_unix.go | 46 + .../pkg/parsers/kernel/kernel_windows.go | 70 + .../pkg/parsers/kernel/uname_freebsd.go | 17 + .../storage/pkg/parsers/kernel/uname_linux.go | 17 + .../pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 14 + .../parsers/kernel/uname_unsupported_type.go | 11 + .../containers/storage/pkg/parsers/parsers.go | 70 + .../containers/storage/pkg/stringid/README.md | 1 + .../storage/pkg/stringid/stringid.go | 106 + .../storage/pkg/stringutils/README.md | 1 + .../storage/pkg/stringutils/stringutils.go | 110 + .../storage/pkg/tarlog/tarlogger.go | 66 + .../storage/pkg/truncindex/truncindex.go | 139 + .../containers/storage/storage.conf | 243 + .../containers/storage/storage.conf-freebsd | 209 + vendor/github.com/containers/storage/store.go | 3621 +++++ .../storage/types/default_override_test.conf | 11 + .../containers/storage/types/errors.go | 99 + .../containers/storage/types/idmappings.go | 101 + .../containers/storage/types/options.go | 554 + .../storage/types/options_darwin.go | 16 + .../storage/types/options_freebsd.go | 19 + .../containers/storage/types/options_linux.go | 52 + .../storage/types/options_windows.go | 19 + .../storage/types/storage_broken.conf | 18 + .../storage/types/storage_test.conf | 45 + .../containers/storage/types/utils.go | 220 + .../github.com/containers/storage/userns.go | 311 + vendor/github.com/containers/storage/utils.go | 74 + .../cyphar/filepath-securejoin/LICENSE | 28 + .../cyphar/filepath-securejoin/README.md | 79 + .../cyphar/filepath-securejoin/VERSION | 1 + .../cyphar/filepath-securejoin/join.go | 125 + .../cyphar/filepath-securejoin/vfs.go | 41 + .../reference/helpers_deprecated.go | 34 + .../reference/normalize_deprecated.go | 92 + .../reference/reference_deprecated.go | 172 + .../reference/regexp_deprecated.go | 50 + .../distribution/reference/sort_deprecated.go | 10 + vendor/github.com/docker/docker/api/README.md | 42 + vendor/github.com/docker/docker/api/common.go | 11 + .../docker/docker/api/common_unix.go | 7 + .../docker/docker/api/common_windows.go | 8 + .../docker/docker/api/swagger-gen.yaml | 12 + .../github.com/docker/docker/api/swagger.yaml | 12145 ++++++++++++++++ .../docker/docker/api/types/auth.go | 7 + .../docker/docker/api/types/blkiodev/blkio.go | 23 + .../docker/docker/api/types/client.go | 444 + .../docker/docker/api/types/configs.go | 67 + .../container/change_response_deprecated.go | 6 + .../docker/api/types/container/change_type.go | 15 + .../api/types/container/change_types.go | 23 + .../docker/api/types/container/config.go | 96 + .../api/types/container/container_top.go | 22 + .../api/types/container/container_update.go | 16 + .../api/types/container/create_response.go | 19 + .../api/types/container/filesystem_change.go | 19 + .../docker/api/types/container/hostconfig.go | 456 + .../api/types/container/hostconfig_unix.go | 42 + .../api/types/container/hostconfig_windows.go | 40 + .../api/types/container/wait_exit_error.go | 12 + .../api/types/container/wait_response.go | 18 + .../api/types/container/waitcondition.go | 22 + .../docker/docker/api/types/error_response.go | 13 + .../docker/api/types/error_response_ext.go | 6 + .../docker/docker/api/types/events/events.go | 47 + .../docker/docker/api/types/filters/errors.go | 37 + .../docker/docker/api/types/filters/parse.go | 346 + .../docker/api/types/graph_driver_data.go | 23 + .../docker/docker/api/types/id_response.go | 13 + .../docker/api/types/image/image_history.go | 36 + .../docker/docker/api/types/image/opts.go | 9 + .../api/types/image_delete_response_item.go | 15 + .../docker/docker/api/types/image_summary.go | 94 + .../docker/docker/api/types/mount/mount.go | 140 + .../docker/api/types/network/network.go | 126 + .../docker/docker/api/types/plugin.go | 203 + .../docker/docker/api/types/plugin_device.go | 25 + .../docker/docker/api/types/plugin_env.go | 25 + .../docker/api/types/plugin_interface_type.go | 21 + .../docker/docker/api/types/plugin_mount.go | 37 + .../docker/api/types/plugin_responses.go | 71 + .../docker/docker/api/types/port.go | 23 + .../docker/api/types/registry/authconfig.go | 99 + .../docker/api/types/registry/authenticate.go | 21 + .../docker/api/types/registry/registry.go | 120 + .../api/types/service_update_response.go | 12 + .../docker/docker/api/types/stats.go | 181 + .../docker/api/types/strslice/strslice.go | 30 + .../docker/docker/api/types/swarm/common.go | 48 + .../docker/docker/api/types/swarm/config.go | 40 + .../docker/api/types/swarm/container.go | 80 + .../docker/docker/api/types/swarm/network.go | 121 + .../docker/docker/api/types/swarm/node.go | 139 + .../docker/docker/api/types/swarm/runtime.go | 27 + .../docker/api/types/swarm/runtime/gen.go | 3 + .../api/types/swarm/runtime/plugin.pb.go | 754 + .../api/types/swarm/runtime/plugin.proto | 21 + .../docker/docker/api/types/swarm/secret.go | 36 + .../docker/docker/api/types/swarm/service.go | 202 + .../docker/docker/api/types/swarm/swarm.go | 237 + .../docker/docker/api/types/swarm/task.go | 225 + .../docker/docker/api/types/time/timestamp.go | 131 + .../docker/docker/api/types/types.go | 811 ++ .../docker/api/types/volume/cluster_volume.go | 420 + .../docker/api/types/volume/create_options.go | 29 + .../docker/api/types/volume/list_response.go | 18 + .../docker/docker/api/types/volume/options.go | 8 + .../docker/docker/api/types/volume/volume.go | 75 + .../docker/api/types/volume/volume_update.go | 7 + .../github.com/docker/docker/client/README.md | 35 + .../docker/docker/client/build_cancel.go | 16 + .../docker/docker/client/build_prune.go | 45 + .../docker/docker/client/checkpoint_create.go | 14 + .../docker/docker/client/checkpoint_delete.go | 20 + .../docker/docker/client/checkpoint_list.go | 28 + .../github.com/docker/docker/client/client.go | 354 + .../docker/docker/client/client_deprecated.go | 27 + .../docker/docker/client/client_unix.go | 8 + .../docker/docker/client/client_windows.go | 5 + .../docker/docker/client/config_create.go | 25 + .../docker/docker/client/config_inspect.go | 36 + .../docker/docker/client/config_list.go | 38 + .../docker/docker/client/config_remove.go | 13 + .../docker/docker/client/config_update.go | 20 + .../docker/docker/client/container_attach.go | 59 + .../docker/docker/client/container_commit.go | 55 + .../docker/docker/client/container_copy.go | 93 + .../docker/docker/client/container_create.go | 83 + .../docker/docker/client/container_diff.go | 23 + .../docker/docker/client/container_exec.go | 66 + .../docker/docker/client/container_export.go | 19 + .../docker/docker/client/container_inspect.go | 53 + .../docker/docker/client/container_kill.go | 18 + .../docker/docker/client/container_list.go | 57 + .../docker/docker/client/container_logs.go | 80 + .../docker/docker/client/container_pause.go | 10 + .../docker/docker/client/container_prune.go | 36 + .../docker/docker/client/container_remove.go | 27 + .../docker/docker/client/container_rename.go | 15 + .../docker/docker/client/container_resize.go | 29 + .../docker/docker/client/container_restart.go | 26 + .../docker/docker/client/container_start.go | 23 + .../docker/docker/client/container_stats.go | 42 + .../docker/docker/client/container_stop.go | 30 + .../docker/docker/client/container_top.go | 28 + .../docker/docker/client/container_unpause.go | 10 + .../docker/docker/client/container_update.go | 21 + .../docker/docker/client/container_wait.go | 104 + .../docker/docker/client/disk_usage.go | 33 + .../docker/client/distribution_inspect.go | 38 + .../docker/docker/client/envvars.go | 90 + .../github.com/docker/docker/client/errors.go | 68 + .../github.com/docker/docker/client/events.go | 101 + .../github.com/docker/docker/client/hijack.go | 148 + .../docker/docker/client/image_build.go | 146 + .../docker/docker/client/image_create.go | 38 + .../docker/docker/client/image_history.go | 22 + .../docker/docker/client/image_import.go | 40 + .../docker/docker/client/image_inspect.go | 32 + .../docker/docker/client/image_list.go | 49 + .../docker/docker/client/image_load.go | 29 + .../docker/docker/client/image_prune.go | 36 + .../docker/docker/client/image_pull.go | 64 + .../docker/docker/client/image_push.go | 55 + .../docker/docker/client/image_remove.go | 31 + .../docker/docker/client/image_save.go | 21 + .../docker/docker/client/image_search.go | 53 + .../docker/docker/client/image_tag.go | 37 + .../github.com/docker/docker/client/info.go | 26 + .../docker/docker/client/interface.go | 201 + .../docker/client/interface_experimental.go | 18 + .../docker/docker/client/interface_stable.go | 10 + .../github.com/docker/docker/client/login.go | 24 + .../docker/docker/client/network_connect.go | 19 + .../docker/docker/client/network_create.go | 25 + .../docker/client/network_disconnect.go | 15 + .../docker/docker/client/network_inspect.go | 49 + .../docker/docker/client/network_list.go | 32 + .../docker/docker/client/network_prune.go | 36 + .../docker/docker/client/network_remove.go | 10 + .../docker/docker/client/node_inspect.go | 32 + .../docker/docker/client/node_list.go | 36 + .../docker/docker/client/node_remove.go | 20 + .../docker/docker/client/node_update.go | 17 + .../docker/docker/client/options.go | 210 + .../github.com/docker/docker/client/ping.go | 75 + .../docker/docker/client/plugin_create.go | 23 + .../docker/docker/client/plugin_disable.go | 19 + .../docker/docker/client/plugin_enable.go | 19 + .../docker/docker/client/plugin_inspect.go | 31 + .../docker/docker/client/plugin_install.go | 114 + .../docker/docker/client/plugin_list.go | 33 + .../docker/docker/client/plugin_push.go | 18 + .../docker/docker/client/plugin_remove.go | 20 + .../docker/docker/client/plugin_set.go | 12 + .../docker/docker/client/plugin_upgrade.go | 40 + .../docker/docker/client/request.go | 275 + .../docker/docker/client/secret_create.go | 25 + .../docker/docker/client/secret_inspect.go | 36 + .../docker/docker/client/secret_list.go | 38 + .../docker/docker/client/secret_remove.go | 13 + .../docker/docker/client/secret_update.go | 20 + .../docker/docker/client/service_create.go | 179 + .../docker/docker/client/service_inspect.go | 37 + .../docker/docker/client/service_list.go | 39 + .../docker/docker/client/service_logs.go | 52 + .../docker/docker/client/service_remove.go | 10 + .../docker/docker/client/service_update.go | 75 + .../docker/client/swarm_get_unlock_key.go | 21 + .../docker/docker/client/swarm_init.go | 21 + .../docker/docker/client/swarm_inspect.go | 21 + .../docker/docker/client/swarm_join.go | 14 + .../docker/docker/client/swarm_leave.go | 17 + .../docker/docker/client/swarm_unlock.go | 14 + .../docker/docker/client/swarm_update.go | 21 + .../docker/docker/client/task_inspect.go | 32 + .../docker/docker/client/task_list.go | 35 + .../docker/docker/client/task_logs.go | 51 + .../docker/docker/client/transport.go | 17 + .../github.com/docker/docker/client/utils.go | 34 + .../docker/docker/client/version.go | 21 + .../docker/docker/client/volume_create.go | 20 + .../docker/docker/client/volume_inspect.go | 38 + .../docker/docker/client/volume_list.go | 33 + .../docker/docker/client/volume_prune.go | 36 + .../docker/docker/client/volume_remove.go | 21 + .../docker/docker/client/volume_update.go | 24 + .../github.com/docker/docker/errdefs/defs.go | 69 + .../github.com/docker/docker/errdefs/doc.go | 8 + .../docker/docker/errdefs/helpers.go | 279 + .../docker/docker/errdefs/http_helpers.go | 46 + vendor/github.com/docker/docker/errdefs/is.go | 107 + .../docker/go-connections/nat/nat.go | 242 + .../docker/go-connections/nat/parse.go | 57 + .../docker/go-connections/nat/sort.go | 96 + .../docker/go-connections/sockets/README.md | 0 .../go-connections/sockets/inmem_socket.go | 81 + .../docker/go-connections/sockets/proxy.go | 51 + .../docker/go-connections/sockets/sockets.go | 38 + .../go-connections/sockets/sockets_unix.go | 35 + .../go-connections/sockets/sockets_windows.go | 27 + .../go-connections/sockets/tcp_socket.go | 22 + .../go-connections/sockets/unix_socket.go | 32 + vendor/github.com/gogo/protobuf/AUTHORS | 15 + vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 + vendor/github.com/gogo/protobuf/LICENSE | 35 + .../github.com/gogo/protobuf/proto/Makefile | 43 + .../github.com/gogo/protobuf/proto/clone.go | 258 + .../gogo/protobuf/proto/custom_gogo.go | 39 + .../github.com/gogo/protobuf/proto/decode.go | 427 + .../gogo/protobuf/proto/deprecated.go | 63 + .../github.com/gogo/protobuf/proto/discard.go | 350 + .../gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 49 + .../github.com/gogo/protobuf/proto/encode.go | 205 + .../gogo/protobuf/proto/encode_gogo.go | 33 + .../github.com/gogo/protobuf/proto/equal.go | 300 + .../gogo/protobuf/proto/extensions.go | 605 + .../gogo/protobuf/proto/extensions_gogo.go | 389 + vendor/github.com/gogo/protobuf/proto/lib.go | 973 ++ .../gogo/protobuf/proto/lib_gogo.go | 50 + .../gogo/protobuf/proto/message_set.go | 181 + .../gogo/protobuf/proto/pointer_reflect.go | 357 + .../protobuf/proto/pointer_reflect_gogo.go | 59 + .../gogo/protobuf/proto/pointer_unsafe.go | 308 + .../protobuf/proto/pointer_unsafe_gogo.go | 56 + .../gogo/protobuf/proto/properties.go | 610 + .../gogo/protobuf/proto/properties_gogo.go | 36 + .../gogo/protobuf/proto/skip_gogo.go | 119 + .../gogo/protobuf/proto/table_marshal.go | 3009 ++++ .../gogo/protobuf/proto/table_marshal_gogo.go | 388 + .../gogo/protobuf/proto/table_merge.go | 676 + .../gogo/protobuf/proto/table_unmarshal.go | 2249 +++ .../protobuf/proto/table_unmarshal_gogo.go | 385 + vendor/github.com/gogo/protobuf/proto/text.go | 930 ++ .../gogo/protobuf/proto/text_gogo.go | 57 + .../gogo/protobuf/proto/text_parser.go | 1018 ++ .../gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 49 + .../gogo/protobuf/proto/wrappers.go | 1888 +++ .../gogo/protobuf/proto/wrappers_gogo.go | 113 + .../go-containerregistry/pkg/v1/config.go | 151 + .../google/go-containerregistry/pkg/v1/doc.go | 18 + .../go-containerregistry/pkg/v1/hash.go | 123 + .../go-containerregistry/pkg/v1/image.go | 59 + .../go-containerregistry/pkg/v1/index.go | 43 + .../go-containerregistry/pkg/v1/layer.go | 42 + .../go-containerregistry/pkg/v1/manifest.go | 71 + .../go-containerregistry/pkg/v1/platform.go | 149 + .../go-containerregistry/pkg/v1/progress.go | 25 + .../pkg/v1/types/types.go | 98 + .../pkg/v1/zz_deepcopy_generated.go | 339 + vendor/github.com/google/go-intervals/LICENSE | 202 + .../go-intervals/intervalset/intervalset.go | 545 + .../intervalset/intervalset_immutable.go | 85 + .../mattn/go-shellwords/.travis.yml | 16 + vendor/github.com/mattn/go-shellwords/LICENSE | 21 + .../github.com/mattn/go-shellwords/README.md | 55 + .../github.com/mattn/go-shellwords/go.test.sh | 12 + .../mattn/go-shellwords/shellwords.go | 317 + .../mattn/go-shellwords/util_posix.go | 29 + .../mattn/go-shellwords/util_windows.go | 29 + vendor/github.com/mistifyio/go-zfs/v3/.envrc | 4 + .../github.com/mistifyio/go-zfs/v3/.gitignore | 6 + .../mistifyio/go-zfs/v3/.golangci.yml | 207 + .../github.com/mistifyio/go-zfs/v3/.yamllint | 16 + .../mistifyio/go-zfs/v3/CHANGELOG.md | 250 + .../mistifyio/go-zfs/v3/CONTRIBUTING.md | 64 + vendor/github.com/mistifyio/go-zfs/v3/LICENSE | 201 + .../github.com/mistifyio/go-zfs/v3/Makefile | 19 + .../github.com/mistifyio/go-zfs/v3/README.md | 53 + .../mistifyio/go-zfs/v3/Vagrantfile | 33 + .../github.com/mistifyio/go-zfs/v3/error.go | 18 + vendor/github.com/mistifyio/go-zfs/v3/lint.mk | 75 + .../github.com/mistifyio/go-zfs/v3/rules.mk | 49 + .../github.com/mistifyio/go-zfs/v3/shell.nix | 26 + .../github.com/mistifyio/go-zfs/v3/utils.go | 357 + .../mistifyio/go-zfs/v3/utils_notsolaris.go | 19 + .../mistifyio/go-zfs/v3/utils_solaris.go | 19 + vendor/github.com/mistifyio/go-zfs/v3/zfs.go | 449 + .../github.com/mistifyio/go-zfs/v3/zpool.go | 116 + .../opencontainers/go-digest/digestset/set.go | 262 + .../github.com/opencontainers/selinux/LICENSE | 201 + .../opencontainers/selinux/go-selinux/doc.go | 13 + .../selinux/go-selinux/label/label.go | 115 + .../selinux/go-selinux/label/label_linux.go | 150 + .../selinux/go-selinux/label/label_stub.go | 50 + .../selinux/go-selinux/selinux.go | 314 + .../selinux/go-selinux/selinux_linux.go | 1295 ++ .../selinux/go-selinux/selinux_stub.go | 155 + .../selinux/go-selinux/xattrs_linux.go | 71 + .../selinux/pkg/pwalk/README.md | 48 + .../opencontainers/selinux/pkg/pwalk/pwalk.go | 123 + .../selinux/pkg/pwalkdir/README.md | 54 + .../selinux/pkg/pwalkdir/pwalkdir.go | 116 + vendor/github.com/ostreedev/ostree-go/LICENSE | 17 + .../ostree-go/pkg/glibobject/gboolean.go | 60 + .../ostree-go/pkg/glibobject/gcancellable.go | 47 + .../ostree-go/pkg/glibobject/gerror.go | 71 + .../ostree-go/pkg/glibobject/gfile.go | 52 + .../ostree-go/pkg/glibobject/gfileinfo.go | 53 + .../ostree-go/pkg/glibobject/ghashtable.go | 50 + .../pkg/glibobject/ghashtableiter.go | 50 + .../ostree-go/pkg/glibobject/glibobject.go | 27 + .../ostree-go/pkg/glibobject/glibobject.go.h | 17 + .../ostree-go/pkg/glibobject/gobject.go | 79 + .../pkg/glibobject/goptioncontext.go | 51 + .../ostree-go/pkg/glibobject/gvariant.go | 100 + .../ostree-go/pkg/otbuiltin/builtin.go | 119 + .../ostree-go/pkg/otbuiltin/builtin.go.h | 179 + .../ostree-go/pkg/otbuiltin/checkout.go | 111 + .../ostree-go/pkg/otbuiltin/commit.go | 483 + .../ostreedev/ostree-go/pkg/otbuiltin/init.go | 84 + .../ostreedev/ostree-go/pkg/otbuiltin/log.go | 163 + .../ostree-go/pkg/otbuiltin/prune.go | 217 + vendor/github.com/sylabs/sif/v2/LICENSE.md | 29 + .../github.com/sylabs/sif/v2/pkg/sif/arch.go | 72 + .../sylabs/sif/v2/pkg/sif/buffer.go | 103 + .../sylabs/sif/v2/pkg/sif/create.go | 698 + .../sylabs/sif/v2/pkg/sif/descriptor.go | 392 + .../sylabs/sif/v2/pkg/sif/descriptor_input.go | 339 + .../github.com/sylabs/sif/v2/pkg/sif/load.go | 174 + .../sylabs/sif/v2/pkg/sif/select.go | 222 + .../github.com/sylabs/sif/v2/pkg/sif/sif.go | 416 + .../github.com/tchap/go-patricia/v2/AUTHORS | 3 + .../github.com/tchap/go-patricia/v2/LICENSE | 20 + .../tchap/go-patricia/v2/patricia/children.go | 363 + .../tchap/go-patricia/v2/patricia/patricia.go | 606 + vendor/golang.org/x/mod/LICENSE | 27 + vendor/golang.org/x/mod/PATENTS | 22 + vendor/golang.org/x/mod/semver/semver.go | 401 + .../golang.org/x/net/internal/socks/client.go | 168 + .../golang.org/x/net/internal/socks/socks.go | 317 + vendor/golang.org/x/net/proxy/dial.go | 54 + vendor/golang.org/x/net/proxy/direct.go | 31 + vendor/golang.org/x/net/proxy/per_host.go | 155 + vendor/golang.org/x/net/proxy/proxy.go | 149 + vendor/golang.org/x/net/proxy/socks5.go | 42 + vendor/golang.org/x/sync/errgroup/errgroup.go | 132 + vendor/golang.org/x/sync/errgroup/go120.go | 13 + .../golang.org/x/sync/errgroup/pre_go120.go | 14 + vendor/golang.org/x/sys/execabs/execabs.go | 102 + .../golang.org/x/sys/execabs/execabs_go118.go | 17 + .../golang.org/x/sys/execabs/execabs_go119.go | 20 + .../golang.org/x/sys/windows/registry/key.go | 205 + .../x/sys/windows/registry/mksyscall.go | 9 + .../x/sys/windows/registry/syscall.go | 32 + .../x/sys/windows/registry/value.go | 386 + .../sys/windows/registry/zsyscall_windows.go | 117 + vendor/golang.org/x/tools/LICENSE | 27 + vendor/golang.org/x/tools/PATENTS | 22 + .../x/tools/cmd/stringer/stringer.go | 660 + .../x/tools/go/gcexportdata/gcexportdata.go | 186 + .../x/tools/go/gcexportdata/importer.go | 75 + .../tools/go/internal/packagesdriver/sizes.go | 48 + vendor/golang.org/x/tools/go/packages/doc.go | 220 + .../x/tools/go/packages/external.go | 101 + .../golang.org/x/tools/go/packages/golist.go | 1181 ++ .../x/tools/go/packages/golist_overlay.go | 575 + .../x/tools/go/packages/loadmode_string.go | 57 + .../x/tools/go/packages/packages.go | 1333 ++ .../golang.org/x/tools/go/packages/visit.go | 59 + .../x/tools/go/types/objectpath/objectpath.go | 827 ++ .../x/tools/internal/event/core/event.go | 85 + .../x/tools/internal/event/core/export.go | 70 + .../x/tools/internal/event/core/fast.go | 77 + .../golang.org/x/tools/internal/event/doc.go | 7 + .../x/tools/internal/event/event.go | 127 + .../x/tools/internal/event/keys/keys.go | 564 + .../x/tools/internal/event/keys/standard.go | 22 + .../x/tools/internal/event/label/label.go | 215 + .../x/tools/internal/event/tag/tag.go | 59 + .../x/tools/internal/gcimporter/bimport.go | 150 + .../x/tools/internal/gcimporter/exportdata.go | 99 + .../x/tools/internal/gcimporter/gcimporter.go | 273 + .../x/tools/internal/gcimporter/iexport.go | 1322 ++ .../x/tools/internal/gcimporter/iimport.go | 1083 ++ .../internal/gcimporter/newInterface10.go | 22 + .../internal/gcimporter/newInterface11.go | 14 + .../internal/gcimporter/support_go117.go | 16 + .../internal/gcimporter/support_go118.go | 37 + .../x/tools/internal/gcimporter/unified_no.go | 10 + .../tools/internal/gcimporter/unified_yes.go | 10 + .../x/tools/internal/gcimporter/ureader_no.go | 19 + .../tools/internal/gcimporter/ureader_yes.go | 728 + .../x/tools/internal/gocommand/invoke.go | 462 + .../x/tools/internal/gocommand/vendor.go | 109 + .../x/tools/internal/gocommand/version.go | 71 + .../internal/packagesinternal/packages.go | 30 + .../x/tools/internal/pkgbits/codes.go | 77 + .../x/tools/internal/pkgbits/decoder.go | 517 + .../x/tools/internal/pkgbits/doc.go | 32 + .../x/tools/internal/pkgbits/encoder.go | 383 + .../x/tools/internal/pkgbits/flags.go | 9 + .../x/tools/internal/pkgbits/frames_go1.go | 21 + .../x/tools/internal/pkgbits/frames_go17.go | 28 + .../x/tools/internal/pkgbits/reloc.go | 42 + .../x/tools/internal/pkgbits/support.go | 17 + .../x/tools/internal/pkgbits/sync.go | 113 + .../internal/pkgbits/syncmarker_string.go | 89 + .../internal/tokeninternal/tokeninternal.go | 151 + .../x/tools/internal/typeparams/common.go | 204 + .../x/tools/internal/typeparams/coretype.go | 122 + .../internal/typeparams/enabled_go117.go | 12 + .../internal/typeparams/enabled_go118.go | 15 + .../x/tools/internal/typeparams/normalize.go | 218 + .../x/tools/internal/typeparams/termlist.go | 163 + .../internal/typeparams/typeparams_go117.go | 197 + .../internal/typeparams/typeparams_go118.go | 151 + .../x/tools/internal/typeparams/typeterm.go | 169 + .../tools/internal/typesinternal/errorcode.go | 1560 ++ .../typesinternal/errorcode_string.go | 179 + .../internal/typesinternal/objectpath.go | 24 + .../x/tools/internal/typesinternal/types.go | 52 + .../tools/internal/typesinternal/types_118.go | 19 + vendor/modules.txt | 169 + 958 files changed, 148422 insertions(+), 19 deletions(-) create mode 100644 vendor/dario.cat/mergo/.deepsource.toml create mode 100644 vendor/dario.cat/mergo/.gitignore create mode 100644 vendor/dario.cat/mergo/.travis.yml create mode 100644 vendor/dario.cat/mergo/CODE_OF_CONDUCT.md create mode 100644 vendor/dario.cat/mergo/CONTRIBUTING.md create mode 100644 vendor/dario.cat/mergo/LICENSE create mode 100644 vendor/dario.cat/mergo/README.md create mode 100644 vendor/dario.cat/mergo/SECURITY.md create mode 100644 vendor/dario.cat/mergo/doc.go create mode 100644 vendor/dario.cat/mergo/map.go create mode 100644 vendor/dario.cat/mergo/merge.go create mode 100644 vendor/dario.cat/mergo/mergo.go create mode 100644 vendor/github.com/Microsoft/go-winio/.gitattributes create mode 100644 vendor/github.com/Microsoft/go-winio/.gitignore create mode 100644 vendor/github.com/Microsoft/go-winio/.golangci.yml create mode 100644 vendor/github.com/Microsoft/go-winio/CODEOWNERS create mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/README.md create mode 100644 vendor/github.com/Microsoft/go-winio/SECURITY.md create mode 100644 vendor/github.com/Microsoft/go-winio/backup.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/doc.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/strconv.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/tar.go create mode 100644 vendor/github.com/Microsoft/go-winio/doc.go create mode 100644 vendor/github.com/Microsoft/go-winio/ea.go create mode 100644 vendor/github.com/Microsoft/go-winio/file.go create mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go create mode 100644 vendor/github.com/Microsoft/go-winio/hvsock.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/doc.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/fs.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/security.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/socket/socket.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go create mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go create mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd.go create mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go create mode 100644 vendor/github.com/Microsoft/go-winio/tools.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/vhd.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/.gitattributes create mode 100644 vendor/github.com/Microsoft/hcsshim/.gitignore create mode 100644 vendor/github.com/Microsoft/hcsshim/.golangci.yml create mode 100644 vendor/github.com/Microsoft/hcsshim/CODEOWNERS create mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE create mode 100644 vendor/github.com/Microsoft/hcsshim/Makefile create mode 100644 vendor/github.com/Microsoft/hcsshim/Protobuild.toml create mode 100644 vendor/github.com/Microsoft/hcsshim/README.md create mode 100644 vendor/github.com/Microsoft/hcsshim/SECURITY.md create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/attach.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/detach.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/export.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/format.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/import.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/mount.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/setup.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/storage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcsshim.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsendpoint.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsglobals.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsnetwork.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicylist.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnssupport.go create mode 100644 vendor/github.com/Microsoft/hcsshim/interface.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/context.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/format.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/hook.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/nopformatter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/memory/types.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/span.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/layer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go create mode 100644 vendor/github.com/Microsoft/hcsshim/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go create mode 100644 vendor/github.com/containerd/cgroups/v3/LICENSE create mode 100644 vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go create mode 100644 vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go create mode 100644 vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt create mode 100644 vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto create mode 100644 vendor/github.com/containerd/containerd/LICENSE create mode 100644 vendor/github.com/containerd/containerd/NOTICE create mode 100644 vendor/github.com/containerd/containerd/errdefs/errors.go create mode 100644 vendor/github.com/containerd/containerd/errdefs/grpc.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/build.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/types.go create mode 100644 vendor/github.com/containers/image/v5/directory/directory_dest.go create mode 100644 vendor/github.com/containers/image/v5/directory/directory_src.go create mode 100644 vendor/github.com/containers/image/v5/directory/directory_transport.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/client.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go create mode 100644 vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift-copies.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift_dest.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift_src.go create mode 100644 vendor/github.com/containers/image/v5/openshift/openshift_transport.go create mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_dest.go create mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_src.go create mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_transport.go create mode 100644 vendor/github.com/containers/image/v5/sif/load.go create mode 100644 vendor/github.com/containers/image/v5/sif/src.go create mode 100644 vendor/github.com/containers/image/v5/sif/transport.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_dest.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_image.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_reference.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_src.go create mode 100644 vendor/github.com/containers/image/v5/storage/storage_transport.go create mode 100644 vendor/github.com/containers/image/v5/tarball/doc.go create mode 100644 vendor/github.com/containers/image/v5/tarball/tarball_reference.go create mode 100644 vendor/github.com/containers/image/v5/tarball/tarball_src.go create mode 100644 vendor/github.com/containers/image/v5/tarball/tarball_transport.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/ostree.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/storage.go create mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go create mode 100644 vendor/github.com/containers/storage/.cirrus.yml create mode 100644 vendor/github.com/containers/storage/.dockerignore create mode 100644 vendor/github.com/containers/storage/.gitignore create mode 100644 vendor/github.com/containers/storage/.golangci.yml create mode 100644 vendor/github.com/containers/storage/.mailmap create mode 100644 vendor/github.com/containers/storage/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/containers/storage/CONTRIBUTING.md create mode 100644 vendor/github.com/containers/storage/Makefile create mode 100644 vendor/github.com/containers/storage/OWNERS create mode 100644 vendor/github.com/containers/storage/README.md create mode 100644 vendor/github.com/containers/storage/SECURITY.md create mode 100644 vendor/github.com/containers/storage/VERSION create mode 100644 vendor/github.com/containers/storage/check.go create mode 100644 vendor/github.com/containers/storage/containers.go create mode 100644 vendor/github.com/containers/storage/deprecated.go create mode 100644 vendor/github.com/containers/storage/drivers/aufs/aufs.go create mode 100644 vendor/github.com/containers/storage/drivers/aufs/dirs.go create mode 100644 vendor/github.com/containers/storage/drivers/aufs/mount.go create mode 100644 vendor/github.com/containers/storage/drivers/aufs/mount_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/btrfs/btrfs.go create mode 100644 vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/btrfs/version.go create mode 100644 vendor/github.com/containers/storage/drivers/btrfs/version_none.go create mode 100644 vendor/github.com/containers/storage/drivers/chown.go create mode 100644 vendor/github.com/containers/storage/drivers/chown_darwin.go create mode 100644 vendor/github.com/containers/storage/drivers/chown_unix.go create mode 100644 vendor/github.com/containers/storage/drivers/chown_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/chroot_unix.go create mode 100644 vendor/github.com/containers/storage/drivers/chroot_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/copy/copy_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/counter.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/device_setup.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/deviceset.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/driver.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go create mode 100644 vendor/github.com/containers/storage/drivers/devmapper/mount.go create mode 100644 vendor/github.com/containers/storage/drivers/driver.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_darwin.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_freebsd.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_solaris.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/fsdiff.go create mode 100644 vendor/github.com/containers/storage/drivers/jsoniter.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/check.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/check_116.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/jsoniter.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/mount.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/randomid.go create mode 100644 vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go create mode 100644 vendor/github.com/containers/storage/drivers/quota/projectquota.go create mode 100644 vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_aufs.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_btrfs.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_devicemapper.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_overlay.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_vfs.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/register/register_zfs.go create mode 100644 vendor/github.com/containers/storage/drivers/template.go create mode 100644 vendor/github.com/containers/storage/drivers/vfs/copy_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/vfs/driver.go create mode 100644 vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go create mode 100644 vendor/github.com/containers/storage/drivers/windows/windows.go create mode 100644 vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS create mode 100644 vendor/github.com/containers/storage/drivers/zfs/zfs.go create mode 100644 vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go create mode 100644 vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go create mode 100644 vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go create mode 100644 vendor/github.com/containers/storage/errors.go create mode 100644 vendor/github.com/containers/storage/idset.go create mode 100644 vendor/github.com/containers/storage/images.go create mode 100644 vendor/github.com/containers/storage/jsoniter.go create mode 100644 vendor/github.com/containers/storage/layers.go create mode 100644 vendor/github.com/containers/storage/lockfile_compat.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/archive.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/diff.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/cache_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/compression.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/compression_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/dump/dump.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/storage.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/storage_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/config/config.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go create mode 100644 vendor/github.com/containers/storage/pkg/devicemapper/log.go create mode 100644 vendor/github.com/containers/storage/pkg/directory/directory.go create mode 100644 vendor/github.com/containers/storage/pkg/directory/directory_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/directory/directory_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go create mode 100644 vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/locker/README.md create mode 100644 vendor/github.com/containers/storage/pkg/locker/locker.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/ioctl.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/loopback.go create mode 100644 vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/parsers.go create mode 100644 vendor/github.com/containers/storage/pkg/stringid/README.md create mode 100644 vendor/github.com/containers/storage/pkg/stringid/stringid.go create mode 100644 vendor/github.com/containers/storage/pkg/stringutils/README.md create mode 100644 vendor/github.com/containers/storage/pkg/stringutils/stringutils.go create mode 100644 vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go create mode 100644 vendor/github.com/containers/storage/pkg/truncindex/truncindex.go create mode 100644 vendor/github.com/containers/storage/storage.conf create mode 100644 vendor/github.com/containers/storage/storage.conf-freebsd create mode 100644 vendor/github.com/containers/storage/store.go create mode 100644 vendor/github.com/containers/storage/types/default_override_test.conf create mode 100644 vendor/github.com/containers/storage/types/errors.go create mode 100644 vendor/github.com/containers/storage/types/idmappings.go create mode 100644 vendor/github.com/containers/storage/types/options.go create mode 100644 vendor/github.com/containers/storage/types/options_darwin.go create mode 100644 vendor/github.com/containers/storage/types/options_freebsd.go create mode 100644 vendor/github.com/containers/storage/types/options_linux.go create mode 100644 vendor/github.com/containers/storage/types/options_windows.go create mode 100644 vendor/github.com/containers/storage/types/storage_broken.conf create mode 100644 vendor/github.com/containers/storage/types/storage_test.conf create mode 100644 vendor/github.com/containers/storage/types/utils.go create mode 100644 vendor/github.com/containers/storage/userns.go create mode 100644 vendor/github.com/containers/storage/utils.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/LICENSE create mode 100644 vendor/github.com/cyphar/filepath-securejoin/README.md create mode 100644 vendor/github.com/cyphar/filepath-securejoin/VERSION create mode 100644 vendor/github.com/cyphar/filepath-securejoin/join.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/vfs.go create mode 100644 vendor/github.com/docker/distribution/reference/helpers_deprecated.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize_deprecated.go create mode 100644 vendor/github.com/docker/distribution/reference/reference_deprecated.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp_deprecated.go create mode 100644 vendor/github.com/docker/distribution/reference/sort_deprecated.go create mode 100644 vendor/github.com/docker/docker/api/README.md create mode 100644 vendor/github.com/docker/docker/api/common.go create mode 100644 vendor/github.com/docker/docker/api/common_unix.go create mode 100644 vendor/github.com/docker/docker/api/common_windows.go create mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml create mode 100644 vendor/github.com/docker/docker/api/swagger.yaml create mode 100644 vendor/github.com/docker/docker/api/types/auth.go create mode 100644 vendor/github.com/docker/docker/api/types/blkiodev/blkio.go create mode 100644 vendor/github.com/docker/docker/api/types/client.go create mode 100644 vendor/github.com/docker/docker/api/types/configs.go create mode 100644 vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go create mode 100644 vendor/github.com/docker/docker/api/types/container/change_type.go create mode 100644 vendor/github.com/docker/docker/api/types/container/change_types.go create mode 100644 vendor/github.com/docker/docker/api/types/container/config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go create mode 100644 vendor/github.com/docker/docker/api/types/container/create_response.go create mode 100644 vendor/github.com/docker/docker/api/types/container/filesystem_change.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_exit_error.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_response.go create mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response_ext.go create mode 100644 vendor/github.com/docker/docker/api/types/events/events.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/errors.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go create mode 100644 vendor/github.com/docker/docker/api/types/graph_driver_data.go create mode 100644 vendor/github.com/docker/docker/api/types/id_response.go create mode 100644 vendor/github.com/docker/docker/api/types/image/image_history.go create mode 100644 vendor/github.com/docker/docker/api/types/image/opts.go create mode 100644 vendor/github.com/docker/docker/api/types/image_delete_response_item.go create mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go create mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go create mode 100644 vendor/github.com/docker/docker/api/types/network/network.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go create mode 100644 vendor/github.com/docker/docker/api/types/port.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authconfig.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go create mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go create mode 100644 vendor/github.com/docker/docker/api/types/stats.go create mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto create mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go create mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp.go create mode 100644 vendor/github.com/docker/docker/api/types/types.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/cluster_volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/create_options.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/list_response.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/options.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_update.go create mode 100644 vendor/github.com/docker/docker/client/README.md create mode 100644 vendor/github.com/docker/docker/client/build_cancel.go create mode 100644 vendor/github.com/docker/docker/client/build_prune.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go create mode 100644 vendor/github.com/docker/docker/client/client.go create mode 100644 vendor/github.com/docker/docker/client/client_deprecated.go create mode 100644 vendor/github.com/docker/docker/client/client_unix.go create mode 100644 vendor/github.com/docker/docker/client/client_windows.go create mode 100644 vendor/github.com/docker/docker/client/config_create.go create mode 100644 vendor/github.com/docker/docker/client/config_inspect.go create mode 100644 vendor/github.com/docker/docker/client/config_list.go create mode 100644 vendor/github.com/docker/docker/client/config_remove.go create mode 100644 vendor/github.com/docker/docker/client/config_update.go create mode 100644 vendor/github.com/docker/docker/client/container_attach.go create mode 100644 vendor/github.com/docker/docker/client/container_commit.go create mode 100644 vendor/github.com/docker/docker/client/container_copy.go create mode 100644 vendor/github.com/docker/docker/client/container_create.go create mode 100644 vendor/github.com/docker/docker/client/container_diff.go create mode 100644 vendor/github.com/docker/docker/client/container_exec.go create mode 100644 vendor/github.com/docker/docker/client/container_export.go create mode 100644 vendor/github.com/docker/docker/client/container_inspect.go create mode 100644 vendor/github.com/docker/docker/client/container_kill.go create mode 100644 vendor/github.com/docker/docker/client/container_list.go create mode 100644 vendor/github.com/docker/docker/client/container_logs.go create mode 100644 vendor/github.com/docker/docker/client/container_pause.go create mode 100644 vendor/github.com/docker/docker/client/container_prune.go create mode 100644 vendor/github.com/docker/docker/client/container_remove.go create mode 100644 vendor/github.com/docker/docker/client/container_rename.go create mode 100644 vendor/github.com/docker/docker/client/container_resize.go create mode 100644 vendor/github.com/docker/docker/client/container_restart.go create mode 100644 vendor/github.com/docker/docker/client/container_start.go create mode 100644 vendor/github.com/docker/docker/client/container_stats.go create mode 100644 vendor/github.com/docker/docker/client/container_stop.go create mode 100644 vendor/github.com/docker/docker/client/container_top.go create mode 100644 vendor/github.com/docker/docker/client/container_unpause.go create mode 100644 vendor/github.com/docker/docker/client/container_update.go create mode 100644 vendor/github.com/docker/docker/client/container_wait.go create mode 100644 vendor/github.com/docker/docker/client/disk_usage.go create mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go create mode 100644 vendor/github.com/docker/docker/client/envvars.go create mode 100644 vendor/github.com/docker/docker/client/errors.go create mode 100644 vendor/github.com/docker/docker/client/events.go create mode 100644 vendor/github.com/docker/docker/client/hijack.go create mode 100644 vendor/github.com/docker/docker/client/image_build.go create mode 100644 vendor/github.com/docker/docker/client/image_create.go create mode 100644 vendor/github.com/docker/docker/client/image_history.go create mode 100644 vendor/github.com/docker/docker/client/image_import.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect.go create mode 100644 vendor/github.com/docker/docker/client/image_list.go create mode 100644 vendor/github.com/docker/docker/client/image_load.go create mode 100644 vendor/github.com/docker/docker/client/image_prune.go create mode 100644 vendor/github.com/docker/docker/client/image_pull.go create mode 100644 vendor/github.com/docker/docker/client/image_push.go create mode 100644 vendor/github.com/docker/docker/client/image_remove.go create mode 100644 vendor/github.com/docker/docker/client/image_save.go create mode 100644 vendor/github.com/docker/docker/client/image_search.go create mode 100644 vendor/github.com/docker/docker/client/image_tag.go create mode 100644 vendor/github.com/docker/docker/client/info.go create mode 100644 vendor/github.com/docker/docker/client/interface.go create mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go create mode 100644 vendor/github.com/docker/docker/client/interface_stable.go create mode 100644 vendor/github.com/docker/docker/client/login.go create mode 100644 vendor/github.com/docker/docker/client/network_connect.go create mode 100644 vendor/github.com/docker/docker/client/network_create.go create mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go create mode 100644 vendor/github.com/docker/docker/client/network_inspect.go create mode 100644 vendor/github.com/docker/docker/client/network_list.go create mode 100644 vendor/github.com/docker/docker/client/network_prune.go create mode 100644 vendor/github.com/docker/docker/client/network_remove.go create mode 100644 vendor/github.com/docker/docker/client/node_inspect.go create mode 100644 vendor/github.com/docker/docker/client/node_list.go create mode 100644 vendor/github.com/docker/docker/client/node_remove.go create mode 100644 vendor/github.com/docker/docker/client/node_update.go create mode 100644 vendor/github.com/docker/docker/client/options.go create mode 100644 vendor/github.com/docker/docker/client/ping.go create mode 100644 vendor/github.com/docker/docker/client/plugin_create.go create mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go create mode 100644 vendor/github.com/docker/docker/client/plugin_install.go create mode 100644 vendor/github.com/docker/docker/client/plugin_list.go create mode 100644 vendor/github.com/docker/docker/client/plugin_push.go create mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go create mode 100644 vendor/github.com/docker/docker/client/plugin_set.go create mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go create mode 100644 vendor/github.com/docker/docker/client/request.go create mode 100644 vendor/github.com/docker/docker/client/secret_create.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go create mode 100644 vendor/github.com/docker/docker/client/secret_list.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove.go create mode 100644 vendor/github.com/docker/docker/client/secret_update.go create mode 100644 vendor/github.com/docker/docker/client/service_create.go create mode 100644 vendor/github.com/docker/docker/client/service_inspect.go create mode 100644 vendor/github.com/docker/docker/client/service_list.go create mode 100644 vendor/github.com/docker/docker/client/service_logs.go create mode 100644 vendor/github.com/docker/docker/client/service_remove.go create mode 100644 vendor/github.com/docker/docker/client/service_update.go create mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/docker/docker/client/swarm_init.go create mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go create mode 100644 vendor/github.com/docker/docker/client/swarm_join.go create mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go create mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go create mode 100644 vendor/github.com/docker/docker/client/swarm_update.go create mode 100644 vendor/github.com/docker/docker/client/task_inspect.go create mode 100644 vendor/github.com/docker/docker/client/task_list.go create mode 100644 vendor/github.com/docker/docker/client/task_logs.go create mode 100644 vendor/github.com/docker/docker/client/transport.go create mode 100644 vendor/github.com/docker/docker/client/utils.go create mode 100644 vendor/github.com/docker/docker/client/version.go create mode 100644 vendor/github.com/docker/docker/client/volume_create.go create mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go create mode 100644 vendor/github.com/docker/docker/client/volume_list.go create mode 100644 vendor/github.com/docker/docker/client/volume_prune.go create mode 100644 vendor/github.com/docker/docker/client/volume_remove.go create mode 100644 vendor/github.com/docker/docker/client/volume_update.go create mode 100644 vendor/github.com/docker/docker/errdefs/defs.go create mode 100644 vendor/github.com/docker/docker/errdefs/doc.go create mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go create mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go create mode 100644 vendor/github.com/docker/docker/errdefs/is.go create mode 100644 vendor/github.com/docker/go-connections/nat/nat.go create mode 100644 vendor/github.com/docker/go-connections/nat/parse.go create mode 100644 vendor/github.com/docker/go-connections/nat/sort.go create mode 100644 vendor/github.com/docker/go-connections/sockets/README.md create mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/proxy.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_unix.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_windows.go create mode 100644 vendor/github.com/docker/go-connections/sockets/tcp_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket.go create mode 100644 vendor/github.com/gogo/protobuf/AUTHORS create mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/config.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/hash.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layer.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/platform.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/progress.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go create mode 100644 vendor/github.com/google/go-intervals/LICENSE create mode 100644 vendor/github.com/google/go-intervals/intervalset/intervalset.go create mode 100644 vendor/github.com/google/go-intervals/intervalset/intervalset_immutable.go create mode 100644 vendor/github.com/mattn/go-shellwords/.travis.yml create mode 100644 vendor/github.com/mattn/go-shellwords/LICENSE create mode 100644 vendor/github.com/mattn/go-shellwords/README.md create mode 100644 vendor/github.com/mattn/go-shellwords/go.test.sh create mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/.envrc create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/.gitignore create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/.yamllint create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/LICENSE create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/Makefile create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/README.md create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/error.go create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/lint.mk create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/rules.mk create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/shell.nix create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/utils.go create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/zfs.go create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/zpool.go create mode 100644 vendor/github.com/opencontainers/go-digest/digestset/set.go create mode 100644 vendor/github.com/opencontainers/selinux/LICENSE create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/doc.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go create mode 100644 vendor/github.com/ostreedev/ostree-go/LICENSE create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go create mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go create mode 100644 vendor/github.com/sylabs/sif/v2/LICENSE.md create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/create.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/load.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/select.go create mode 100644 vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go create mode 100644 vendor/github.com/tchap/go-patricia/v2/AUTHORS create mode 100644 vendor/github.com/tchap/go-patricia/v2/LICENSE create mode 100644 vendor/github.com/tchap/go-patricia/v2/patricia/children.go create mode 100644 vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go create mode 100644 vendor/golang.org/x/mod/LICENSE create mode 100644 vendor/golang.org/x/mod/PATENTS create mode 100644 vendor/golang.org/x/mod/semver/semver.go create mode 100644 vendor/golang.org/x/net/internal/socks/client.go create mode 100644 vendor/golang.org/x/net/internal/socks/socks.go create mode 100644 vendor/golang.org/x/net/proxy/dial.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 vendor/golang.org/x/sync/errgroup/go120.go create mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/golang.org/x/sys/windows/registry/key.go create mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/value.go create mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go create mode 100644 vendor/golang.org/x/tools/LICENSE create mode 100644 vendor/golang.org/x/tools/PATENTS create mode 100644 vendor/golang.org/x/tools/cmd/stringer/stringer.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go create mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go create mode 100644 vendor/golang.org/x/tools/go/packages/doc.go create mode 100644 vendor/golang.org/x/tools/go/packages/external.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go create mode 100644 vendor/golang.org/x/tools/go/packages/loadmode_string.go create mode 100644 vendor/golang.org/x/tools/go/packages/packages.go create mode 100644 vendor/golang.org/x/tools/go/packages/visit.go create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/export.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/fast.go create mode 100644 vendor/golang.org/x/tools/internal/event/doc.go create mode 100644 vendor/golang.org/x/tools/internal/event/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/keys.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/standard.go create mode 100644 vendor/golang.org/x/tools/internal/event/label/label.go create mode 100644 vendor/golang.org/x/tools/internal/event/tag/tag.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/bimport.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/exportdata.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iexport.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go117.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go118.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_no.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/vendor.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/version.go create mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/codes.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/decoder.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/doc.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/encoder.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/flags.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/reloc.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/support.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/sync.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go create mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/objectpath.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types_118.go diff --git a/go.mod b/go.mod index 2796ea23fd..efd22a718c 100644 --- a/go.mod +++ b/go.mod @@ -43,6 +43,7 @@ require ( cloud.google.com/go v0.110.10 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.5 // indirect + dario.cat/mergo v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -53,13 +54,19 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.12.0-rc.1 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/containerd/cgroups/v3 v3.0.2 // indirect + github.com/containerd/containerd v1.7.9 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.1.9 // indirect github.com/containers/storage v1.51.0 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/distribution/reference v0.5.0 // indirect @@ -80,9 +87,11 @@ require ( github.com/go-openapi/strfmt v0.21.7 // indirect github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-containerregistry v0.16.1 // indirect + github.com/google/go-intervals v0.0.2 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect @@ -101,8 +110,10 @@ require ( github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect github.com/mattn/go-sqlite3 v1.14.18 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mountinfo v0.7.1 // indirect @@ -111,6 +122,8 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/runc v1.1.10 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect + github.com/opencontainers/selinux v1.11.0 // indirect + github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/proglottis/gpgme v0.1.3 // indirect @@ -121,7 +134,9 @@ require ( github.com/sigstore/sigstore v1.7.5 // indirect github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect + github.com/sylabs/sif/v2 v2.15.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect + github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.5 // indirect @@ -130,11 +145,13 @@ require ( go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.16.0 // indirect + golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.19.0 // indirect golang.org/x/sync v0.5.0 // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect diff --git a/go.sum b/go.sum index b6e09ca2b4..3dd60c19fd 100644 --- a/go.sum +++ b/go.sum @@ -10,6 +10,7 @@ cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXE cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w= cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2 h1:t5+QXLCK9SVi0PPdaY0PrFvYUo24KwA0QwxnaHRSVd4= @@ -51,6 +52,10 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.12.0-rc.1 h1:Hy+xzYujv7urO5wrgcG58SPMOXNLrj4WCJbySs2XX/A= +github.com/Microsoft/hcsshim v0.12.0-rc.1/go.mod h1:Y1a1S0QlYp1mBpyvGiuEdOfZqnao+0uX5AWHXQ5NhZU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= @@ -67,6 +72,12 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= +github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/containerd/containerd v1.7.9 h1:KOhK01szQbM80YfW1H6RZKh85PHGqY/9OcEZ35Je8sc= +github.com/containerd/containerd v1.7.9/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y= +github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= +github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= github.com/containers/common v0.57.0 h1:5O/+6QUBafKK0/zeok9y1rLPukfWgdE0sT4nuzmyAqk= github.com/containers/common v0.57.0/go.mod h1:t/Z+/sFrapvFMEJe3YnecN49/Tae2wYEQShbEN6SRaU= github.com/containers/image/v5 v5.29.0 h1:9+nhS/ZM7c4Kuzu5tJ0NMpxrgoryOJ2HAYTgG8Ny7j4= @@ -81,6 +92,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -178,6 +191,8 @@ github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/V github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= @@ -214,12 +229,15 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= +github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= @@ -260,6 +278,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= @@ -289,11 +309,15 @@ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsI github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -321,8 +345,12 @@ github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQB github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4= github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= @@ -386,8 +414,12 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw= +github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= @@ -413,6 +445,8 @@ github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMc github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= @@ -427,6 +461,7 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -443,13 +478,19 @@ golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQz golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= @@ -467,6 +508,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= @@ -516,9 +558,15 @@ golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= diff --git a/pkg/container/client.go b/pkg/container/client.go index 33c7d9c065..9d214e8790 100644 --- a/pkg/container/client.go +++ b/pkg/container/client.go @@ -25,6 +25,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" @@ -340,9 +341,24 @@ func (m RawManifest) Digest() (digest.Digest, error) { return manifest.Digest(m.Data) } +func getImageRef(target reference.Named, local *bool) (types.ImageReference, error) { + if local != nil && *local { + ref, err := alltransports.ParseImageName(fmt.Sprintf("containers-storage:%s", target.Name())) + if err != nil { + return nil, err + } + return ref, nil + } + ref, err := docker.NewReference(target) + if err != nil { + return nil, err + } + return ref, nil +} + // GetManifest fetches the raw manifest data from the server. If digest is not empty // it will override any given tag for the Client's Target. -func (cl *Client) GetManifest(ctx context.Context, digest digest.Digest) (r RawManifest, err error) { +func (cl *Client) GetManifest(ctx context.Context, digest digest.Digest, local *bool) (r RawManifest, err error) { target := cl.Target if digest != "" { @@ -355,13 +371,12 @@ func (cl *Client) GetManifest(ctx context.Context, digest digest.Digest) (r RawM target = t } - ref, err := docker.NewReference(target) + ref, err := getImageRef(target, local) if err != nil { return } src, err := ref.NewImageSource(ctx, cl.sysCtx) - if err != nil { return } @@ -402,8 +417,7 @@ func (cl *Client) resolveManifestList(ctx context.Context, list manifestList) (r return resolvedIds{}, err } - raw, err := cl.GetManifest(ctx, digest) - + raw, err := cl.GetManifest(ctx, digest, nil) if err != nil { return resolvedIds{}, fmt.Errorf("error getting manifest: %w", err) } @@ -487,9 +501,9 @@ func (cl *Client) resolveRawManifest(ctx context.Context, rm RawManifest) (resol // which is the digest of the configuration object. It uses the architecture and // variant specified via SetArchitectureChoice or the corresponding defaults for // the host. -func (cl *Client) Resolve(ctx context.Context, name string) (Spec, error) { +func (cl *Client) Resolve(ctx context.Context, name string, localContainer *bool) (Spec, error) { - raw, err := cl.GetManifest(ctx, "") + raw, err := cl.GetManifest(ctx, "", localContainer) if err != nil { return Spec{}, fmt.Errorf("error getting manifest: %w", err) @@ -500,7 +514,7 @@ func (cl *Client) Resolve(ctx context.Context, name string) (Spec, error) { return Spec{}, err } - spec := NewSpec(cl.Target, ids.Manifest, ids.Config, cl.GetTLSVerify(), ids.ListManifest.String(), name) + spec := NewSpec(cl.Target, ids.Manifest, ids.Config, cl.GetTLSVerify(), ids.ListManifest.String(), name, localContainer) return spec, nil } diff --git a/pkg/container/client_test.go b/pkg/container/client_test.go index 659e5a477d..716c90d726 100644 --- a/pkg/container/client_test.go +++ b/pkg/container/client_test.go @@ -35,7 +35,7 @@ func TestClientResolve(t *testing.T) { ctx := context.Background() client.SetArchitectureChoice("amd64") - spec, err := client.Resolve(ctx, "") + spec, err := client.Resolve(ctx, "", nil) assert.NoError(t, err) assert.Equal(t, container.Spec{ @@ -48,7 +48,7 @@ func TestClientResolve(t *testing.T) { }, spec) client.SetArchitectureChoice("ppc64le") - spec, err = client.Resolve(ctx, "") + spec, err = client.Resolve(ctx, "", nil) assert.NoError(t, err) assert.Equal(t, container.Spec{ @@ -62,7 +62,7 @@ func TestClientResolve(t *testing.T) { // don't have that architecture client.SetArchitectureChoice("s390x") - _, err = client.Resolve(ctx, "") + _, err = client.Resolve(ctx, "", nil) assert.Error(t, err) } diff --git a/pkg/container/resolver.go b/pkg/container/resolver.go index 75bdd278a5..0760e1ed2b 100644 --- a/pkg/container/resolver.go +++ b/pkg/container/resolver.go @@ -54,7 +54,7 @@ func (r *Resolver) Add(spec SourceSpec) { } go func() { - spec, err := client.Resolve(r.ctx, spec.Name) + spec, err := client.Resolve(r.ctx, spec.Name, spec.ContainersStorage) if err != nil { err = fmt.Errorf("'%s': %w", spec.Source, err) } diff --git a/pkg/container/spec.go b/pkg/container/spec.go index 2971ad4670..89629a74f2 100644 --- a/pkg/container/spec.go +++ b/pkg/container/spec.go @@ -23,16 +23,17 @@ type Spec struct { // NewSpec creates a new Spec from the essential information. // It also converts is the transition point from container // specific types (digest.Digest) to generic types (string). -func NewSpec(source reference.Named, digest, imageID digest.Digest, tlsVerify *bool, listDigest string, localName string) Spec { +func NewSpec(source reference.Named, digest, imageID digest.Digest, tlsVerify *bool, listDigest string, localName string, containersStorage *bool) Spec { if localName == "" { localName = source.String() } return Spec{ - Source: source.Name(), - Digest: digest.String(), - TLSVerify: tlsVerify, - ImageID: imageID.String(), - LocalName: localName, - ListDigest: listDigest, + Source: source.Name(), + Digest: digest.String(), + TLSVerify: tlsVerify, + ImageID: imageID.String(), + LocalName: localName, + ListDigest: listDigest, + ContainersStorage: containersStorage, } } diff --git a/vendor/dario.cat/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml new file mode 100644 index 0000000000..a8bc979e02 --- /dev/null +++ b/vendor/dario.cat/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "dario.cat/mergo" \ No newline at end of file diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore new file mode 100644 index 0000000000..529c3412ba --- /dev/null +++ b/vendor/dario.cat/mergo/.gitignore @@ -0,0 +1,33 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/dario.cat/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml new file mode 100644 index 0000000000..d324c43ba4 --- /dev/null +++ b/vendor/dario.cat/mergo/.travis.yml @@ -0,0 +1,12 @@ +language: go +arch: + - amd64 + - ppc64le +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - go test -race -v ./... +after_script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..469b44907a --- /dev/null +++ b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/dario.cat/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md new file mode 100644 index 0000000000..0a1ff9f94d --- /dev/null +++ b/vendor/dario.cat/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/dario.cat/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE new file mode 100644 index 0000000000..686680298d --- /dev/null +++ b/vendor/dario.cat/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md new file mode 100644 index 0000000000..7d0cf9f32a --- /dev/null +++ b/vendor/dario.cat/mergo/README.md @@ -0,0 +1,248 @@ +# Mergo + +[![GitHub release][5]][6] +[![GoCard][7]][8] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA status][13]][14] + +[![GoDoc][3]][4] +[![Become my sponsor][15]][16] +[![Tidelift][17]][18] + +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.aaakk.us.kg-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). + +### Important notes + +#### 1.0.0 + +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. + +#### 0.3.9 + +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. + +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +Donate using Liberapay +Become my sponsor + +### Mergo in the wild + +- [moby/moby](https://github.com/moby/moby) +- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) +- [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) + +## Install + + go get dario.cat/mergo + + // use in your .go code + import ( + "dario.cat/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +Here is a nice example: + +```go +package main + +import ( + "fmt" + "dario.cat/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v3 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "dario.cat/mergo" + "reflect" + "time" +) + +type timeTransformer struct { +} + +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md new file mode 100644 index 0000000000..a5de61f77b --- /dev/null +++ b/vendor/dario.cat/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/dario.cat/mergo/doc.go b/vendor/dario.cat/mergo/doc.go new file mode 100644 index 0000000000..7d96ec0546 --- /dev/null +++ b/vendor/dario.cat/mergo/doc.go @@ -0,0 +1,148 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +# Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +# Important notes + +1.0.0 + +In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`. + +0.3.9 + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +# Install + +Do your usual installation procedure: + + go get dario.cat/mergo + + // use in your .go code + import ( + "dario.cat/mergo" + ) + +# Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "dario.cat/mergo" + ) + + type Foo struct { + A string + B int64 + } + + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} + } + +# Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "dario.cat/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { + } + + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } + + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } + } + +# Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +# About + +Written by Dario Castañé: https://da.rio.hn + +# License + +BSD 3-Clause license, as Go language. +*/ +package mergo diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go new file mode 100644 index 0000000000..b50d5c2a4e --- /dev/null +++ b/vendor/dario.cat/mergo/map.go @@ -0,0 +1,178 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go new file mode 100644 index 0000000000..0ef9b2138c --- /dev/null +++ b/vendor/dario.cat/mergo/merge.go @@ -0,0 +1,409 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasMergeableFields(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + +type Config struct { + Transformers Transformers + Overwrite bool + ShouldNotDereference bool + AppendSlice bool + TypeCheck bool + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasMergeableFields(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } + } + + if src.Kind() != reflect.Map { + if overwrite && dst.CanSet() { + dst.Set(src) + } + return + } + + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + + } + dst.SetMapIndex(key, dstSlice) + } + } + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } + } + + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + break + } + + if src.Kind() != reflect.Interface { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break + } + default: + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } + } + } + + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/dario.cat/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go new file mode 100644 index 0000000000..0a721e2d85 --- /dev/null +++ b/vendor/dario.cat/mergo/mergo.go @@ -0,0 +1,81 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerArgument = errors.New("dst must be a pointer") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + typ reflect.Type + next *visit + ptr uintptr +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes new file mode 100644 index 0000000000..94f480de94 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 0000000000..815e20660e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1,10 @@ +.vscode/ + +*.exe + +# testing +testdata + +# go workspaces +go.work +go.work.sum diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml new file mode 100644 index 0000000000..7b503d26a3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -0,0 +1,149 @@ +run: + skip-dirs: + - pkg/etw/sample + +linters: + enable: + # style + - containedctx # struct contains a context + - dupl # duplicate code + - errname # erorrs are named correctly + - nolintlint # "//nolint" directives are properly explained + - revive # golint replacement + - unconvert # unnecessary conversions + - wastedassign + + # bugs, performance, unused, etc ... + - contextcheck # function uses a non-inherited context + - errorlint # errors not wrapped for 1.13 + - exhaustive # check exhaustiveness of enum switch statements + - gofmt # files are gofmt'ed + - gosec # security + - nilerr # returns nil even with non-nil error + - unparam # unused function params + +issues: + exclude-rules: + # err is very often shadowed in nested scopes + - linters: + - govet + text: '^shadow: declaration of "err" shadows declaration' + + # ignore long lines for skip autogen directives + - linters: + - revive + text: "^line-length-limit: " + source: "^//(go:generate|sys) " + + #TODO: remove after upgrading to go1.18 + # ignore comment spacing for nolint and sys directives + - linters: + - revive + text: "^comment-spacings: no space between comment delimiter and comment text" + source: "//(cspell:|nolint:|sys |todo)" + + # not on go 1.18 yet, so no any + - linters: + - revive + text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + + # allow unjustified ignores of error checks in defer statements + - linters: + - nolintlint + text: "^directive `//nolint:errcheck` should provide explanation" + source: '^\s*defer ' + + # allow unjustified ignores of error lints for io.EOF + - linters: + - nolintlint + text: "^directive `//nolint:errorlint` should provide explanation" + source: '[=|!]= io.EOF' + + +linters-settings: + exhaustive: + default-signifies-exhaustive: true + govet: + enable-all: true + disable: + # struct order is often for Win32 compat + # also, ignore pointer bytes/GC issues for now until performance becomes an issue + - fieldalignment + check-shadowing: true + nolintlint: + allow-leading-space: false + require-explanation: true + require-specific: true + revive: + # revive is more configurable than static check, so likely the preferred alternative to static-check + # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) + enable-all-rules: + true + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + rules: + # rules with required arguments + - name: argument-limit + disabled: true + - name: banned-characters + disabled: true + - name: cognitive-complexity + disabled: true + - name: cyclomatic + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: max-public-structs + disabled: true + # geneally annoying rules + - name: add-constant # complains about any and all strings and integers + disabled: true + - name: confusing-naming # we frequently use "Foo()" and "foo()" together + disabled: true + - name: flag-parameter # excessive, and a common idiom we use + disabled: true + - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead + disabled: true + # general config + - name: line-length-limit + arguments: + - 140 + - name: var-naming + arguments: + - [] + - - CID + - CRI + - CTRD + - DACL + - DLL + - DOS + - ETW + - FSCTL + - GCS + - GMSA + - HCS + - HV + - IO + - LCOW + - LDAP + - LPAC + - LTSC + - MMIO + - NT + - OCI + - PMEM + - PWSH + - RX + - SACl + - SID + - SMB + - TX + - VHD + - VHDX + - VMID + - VPCI + - WCOW + - WIM diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 0000000000..ae1b4942b9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 0000000000..b8b569d774 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 0000000000..7474b4f0b6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,89 @@ +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +## Contributing + +This project welcomes contributions and suggestions. +Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that +you have the right to, and actually do, grant us the rights to use your contribution. +For details, visit [Microsoft CLA](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether you need to +provide a CLA and decorate the PR appropriately (e.g., label, comment). +Simply follow the instructions provided by the bot. +You will only need to do this once across all repos using our CLA. + +Additionally, the pull request pipeline requires the following steps to be performed before +mergining. + +### Code Sign-Off + +We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run ./... +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. + +This can be done for the entire repo: + +```shell +> go generate ./... +``` + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Special Thanks + +Thanks to [natefinch][natefinch] for the inspiration for this library. +See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. + +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff + +[natefinch]: https://github.com/natefinch diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md new file mode 100644 index 0000000000..869fdfe2b2 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 0000000000..09621c8846 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,290 @@ +//go:build windows +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "runtime" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId //revive:disable-line:var-naming ID, not Id + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +//nolint:revive // var-naming: ALL_CAPS +const ( + WRITE_DAC = windows.WRITE_DAC + WRITE_OWNER = windows.WRITE_OWNER + ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + //revive:disable-next-line:var-naming ID, not Id + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamID struct { + StreamID uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(io.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamID + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamID, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamID == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamID{ + StreamID: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], + access, + share, + nil, + createmode, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, + 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/doc.go b/vendor/github.com/Microsoft/go-winio/backuptar/doc.go new file mode 100644 index 0000000000..965d52ab04 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/doc.go @@ -0,0 +1,3 @@ +// This file only exists to allow go get on non-Windows platforms. + +package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go new file mode 100644 index 0000000000..455fd798eb --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go @@ -0,0 +1,70 @@ +//go:build windows + +package backuptar + +import ( + "archive/tar" + "fmt" + "strconv" + "strings" + "time" +) + +// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go +// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually. +// Idea taken from containerd which did the same thing. + +// parsePAXTime takes a string of the form %d.%d as described in the PAX +// specification. Note that this implementation allows for negative timestamps, +// which is allowed for by the PAX specification, but not always portable. +func parsePAXTime(s string) (time.Time, error) { + const maxNanoSecondDigits = 9 + + // Split string into seconds and sub-seconds parts. + ss, sn := s, "" + if pos := strings.IndexByte(s, '.'); pos >= 0 { + ss, sn = s[:pos], s[pos+1:] + } + + // Parse the seconds. + secs, err := strconv.ParseInt(ss, 10, 64) + if err != nil { + return time.Time{}, tar.ErrHeader + } + if len(sn) == 0 { + return time.Unix(secs, 0), nil // No sub-second values + } + + // Parse the nanoseconds. + if strings.Trim(sn, "0123456789") != "" { + return time.Time{}, tar.ErrHeader + } + if len(sn) < maxNanoSecondDigits { + sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad + } else { + sn = sn[:maxNanoSecondDigits] // Right truncate + } + nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed + if len(ss) > 0 && ss[0] == '-' { + return time.Unix(secs, -1*nsecs), nil // Negative correction + } + return time.Unix(secs, nsecs), nil +} + +// formatPAXTime converts ts into a time of the form %d.%d as described in the +// PAX specification. This function is capable of negative timestamps. +func formatPAXTime(ts time.Time) (s string) { + secs, nsecs := ts.Unix(), ts.Nanosecond() + if nsecs == 0 { + return strconv.FormatInt(secs, 10) + } + + // If seconds is negative, then perform correction. + sign := "" + if secs < 0 { + sign = "-" // Remember sign + secs = -(secs + 1) // Add a second to secs + nsecs = -(nsecs - 1e9) // Take that second away from nsecs + } + return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go new file mode 100644 index 0000000000..6b3b0cd519 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -0,0 +1,509 @@ +//go:build windows +// +build windows + +package backuptar + +import ( + "archive/tar" + "encoding/base64" + "fmt" + "io" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +//nolint:deadcode,varcheck // keep unused constants for potential future use +const ( + cISUID = 0004000 // Set uid + cISGID = 0002000 // Set gid + cISVTX = 0001000 // Save text (sticky bit) + cISDIR = 0040000 // Directory + cISFIFO = 0010000 // FIFO + cISREG = 0100000 // Regular file + cISLNK = 0120000 // Symbolic link + cISBLK = 0060000 // Block special file + cISCHR = 0020000 // Character special file + cISSOCK = 0140000 // Socket +) + +const ( + hdrFileAttributes = "MSWINDOWS.fileattr" + hdrSecurityDescriptor = "MSWINDOWS.sd" + hdrRawSecurityDescriptor = "MSWINDOWS.rawsd" + hdrMountPoint = "MSWINDOWS.mountpoint" + hdrEaPrefix = "MSWINDOWS.xattr." + + hdrCreationTime = "LIBARCHIVE.creationtime" +) + +// zeroReader is an io.Reader that always returns 0s. +type zeroReader struct{} + +func (zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { + curOffset := int64(0) + for { + bhdr, err := br.Next() + if err == io.EOF { //nolint:errorlint + err = io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if bhdr.Id != winio.BackupSparseBlock { + return fmt.Errorf("unexpected stream %d", bhdr.Id) + } + + // We can't seek backwards, since we have already written that data to the tar.Writer. + if bhdr.Offset < curOffset { + return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset) + } + // archive/tar does not support writing sparse files + // so just write zeroes to catch up to the current offset. + if _, err = io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil { + return fmt.Errorf("seek to offset %d: %w", bhdr.Offset, err) + } + if bhdr.Size == 0 { + // A sparse block with size = 0 is used to mark the end of the sparse blocks. + break + } + n, err := io.Copy(t, br) + if err != nil { + return err + } + if n != bhdr.Size { + return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset) + } + curOffset = bhdr.Offset + n + } + return nil +} + +// BasicInfoHeader creates a tar header from basic file information. +func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { + hdr := &tar.Header{ + Format: tar.FormatPAX, + Name: filepath.ToSlash(name), + Size: size, + Typeflag: tar.TypeReg, + ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), + ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), + AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), + PAXRecords: make(map[string]string), + } + hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) + hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds())) + + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + hdr.Mode |= cISDIR + hdr.Size = 0 + hdr.Typeflag = tar.TypeDir + } + return hdr +} + +// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file +// from the tar header and returns the security descriptor into a byte slice. +func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) { + if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { + sd, err := base64.StdEncoding.DecodeString(sdraw) + if err != nil { + // Not returning sd as-is in the error-case, as base64.DecodeString + // may return partially decoded data (not nil or empty slice) in case + // of a failure: https://github.com/golang/go/blob/go1.17.7/src/encoding/base64/base64.go#L382-L387 + return nil, err + } + return sd, nil + } + // Maintaining old SDDL-based behavior for backward compatibility. All new + // tar headers written by this library will have raw binary for the security + // descriptor. + if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { + return winio.SddlToSecurityDescriptor(sddl) + } + return nil, nil +} + +// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the +// current file from the tar header and returns it as a byte slice. +func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { + var eas []winio.ExtendedAttribute //nolint:prealloc // len(eas) <= len(hdr.PAXRecords); prealloc is wasteful + for k, v := range hdr.PAXRecords { + if !strings.HasPrefix(k, hdrEaPrefix) { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, err + } + eas = append(eas, winio.ExtendedAttribute{ + Name: k[len(hdrEaPrefix):], + Value: data, + }) + } + var eaData []byte + var err error + if len(eas) != 0 { + eaData, err = winio.EncodeExtendedAttributes(eas) + if err != nil { + return nil, err + } + } + return eaData, nil +} + +// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header +// and encodes it into a byte slice. The file for which this function is called must be a +// symlink. +func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte { + _, isMountPoint := hdr.PAXRecords[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + return winio.EncodeReparsePoint(&rp) +} + +// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. +// +// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. +// +// The additional Win32 metadata is: +// +// - MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value +// - MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format +// - MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) +func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { + name = filepath.ToSlash(name) + hdr := BasicInfoHeader(name, size, fileInfo) + + // If r can be seeked, then this function is two-pass: pass 1 collects the + // tar header data, and pass 2 copies the data stream. If r cannot be + // seeked, then some header data (in particular EAs) will be silently lost. + var ( + restartPos int64 + err error + ) + sr, readTwice := r.(io.Seeker) + if readTwice { + if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { + readTwice = false + } + } + + br := winio.NewBackupStreamReader(r) + var dataHdr *winio.BackupHeader + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { //nolint:errorlint + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupData: + hdr.Mode |= cISREG + if !readTwice { + dataHdr = bhdr + } + case winio.BackupSecurity: + sd, err := io.ReadAll(br) + if err != nil { + return err + } + hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) + + case winio.BackupReparseData: + hdr.Mode |= cISLNK + hdr.Typeflag = tar.TypeSymlink + reparseBuffer, _ := io.ReadAll(br) + rp, err := winio.DecodeReparsePoint(reparseBuffer) + if err != nil { + return err + } + if rp.IsMountPoint { + hdr.PAXRecords[hdrMountPoint] = "1" + } + hdr.Linkname = rp.Target + + case winio.BackupEaData: + eab, err := io.ReadAll(br) + if err != nil { + return err + } + eas, err := winio.DecodeExtendedAttributes(eab) + if err != nil { + return err + } + for _, ea := range eas { + // Use base64 encoding for the binary value. Note that there + // is no way to encode the EA's flags, since their use doesn't + // make any sense for persisted EAs. + hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) + } + + case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) + } + } + + err = t.WriteHeader(hdr) + if err != nil { + return err + } + + if readTwice { + // Get back to the data stream. + if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { + return err + } + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { //nolint:errorlint + break + } + if err != nil { + return err + } + if bhdr.Id == winio.BackupData { + dataHdr = bhdr + } + } + } + + // The logic for copying file contents is fairly complicated due to the need for handling sparse files, + // and the weird ways they are represented by BackupRead. A normal file will always either have a data stream + // with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also + // be represented using a series of sparse block streams following the data stream. Additionally, the way sparse + // files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described + // in the list at the bottom of this block comment. + // + // Sparse files can be represented in four different ways, based on the specifics of the file. + // - Size = 0: + // Previously: BackupRead yields no data stream and no sparse block streams. + // Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams. + // - Size > 0, no allocated ranges: + // BackupRead yields a data stream with size = 0. Following is a single sparse block stream with + // size = 0 and offset = . + // - Size > 0, one allocated range: + // BackupRead yields a data stream with size = containing the file contents. There are no + // sparse block streams. This is the case if you take a normal file with contents and simply set the + // sparse flag on it. + // - Size > 0, multiple allocated ranges: + // BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated + // range of the file containing the range contents. Finally there is a sparse block stream with + // size = 0 and offset = . + + if dataHdr != nil { //nolint:nestif // todo: reduce nesting complexity + // A data stream was found. Copy the data. + // We assume that we will either have a data stream size > 0 XOR have sparse block streams. + if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 { + if size != dataHdr.Size { + return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) + } + if _, err = io.Copy(t, br); err != nil { + return fmt.Errorf("%s: copying contents from data stream: %w", name, err) + } + } else if size > 0 { + // As of a recent OS change, BackupRead now returns a data stream for empty sparse files. + // These files have no sparse block streams, so skip the copySparse call if file size = 0. + if err = copySparse(t, br); err != nil { + return fmt.Errorf("%s: copying contents from sparse block stream: %w", name, err) + } + } + } + + // Look for streams after the data stream. The only ones we handle are alternate data streams. + // Other streams may have metadata that could be serialized, but the tar header has already + // been written. In practice, this means that we don't get EA or TXF metadata. + for { + bhdr, err := br.Next() + if err == io.EOF { //nolint:errorlint + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupAlternateData: + if (bhdr.Attributes & winio.StreamSparseAttributes) != 0 { + // Unsupported for now, since the size of the alternate stream is not present + // in the backup stream until after the data has been read. + return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name) + } + altName := strings.TrimSuffix(bhdr.Name, ":$DATA") + hdr = &tar.Header{ + Format: hdr.Format, + Name: name + altName, + Mode: hdr.Mode, + Typeflag: tar.TypeReg, + Size: bhdr.Size, + ModTime: hdr.ModTime, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + err = t.WriteHeader(hdr) + if err != nil { + return err + } + _, err = io.Copy(t, br) + if err != nil { + return err + } + case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) + } + } + return nil +} + +// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by +// WriteTarFileFromBackupStream. +func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { + name = hdr.Name + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + size = hdr.Size + } + fileInfo = &winio.FileBasicInfo{ + LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()), + LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), + ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()), + // Default to ModTime, we'll pull hdrCreationTime below if present + CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), + } + if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok { + attr, err := strconv.ParseUint(attrStr, 10, 32) + if err != nil { + return "", 0, nil, err + } + fileInfo.FileAttributes = uint32(attr) + } else { + if hdr.Typeflag == tar.TypeDir { + fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY + } + } + if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok { + creationTime, err := parsePAXTime(creationTimeStr) + if err != nil { + return "", 0, nil, err + } + fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano()) + } + return name, size, fileInfo, err +} + +// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple +// tar file entries in order to collect all the alternate data streams for the file, it returns the next +// tar file that was not processed, or io.EOF is there are no more. +func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { + bw := winio.NewBackupStreamWriter(w) + + sd, err := SecurityDescriptorFromTarHeader(hdr) + if err != nil { + return nil, err + } + if len(sd) != 0 { + bhdr := winio.BackupHeader{ + Id: winio.BackupSecurity, + Size: int64(len(sd)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(sd) + if err != nil { + return nil, err + } + } + + eadata, err := ExtendedAttributesFromTarHeader(hdr) + if err != nil { + return nil, err + } + if len(eadata) != 0 { + bhdr := winio.BackupHeader{ + Id: winio.BackupEaData, + Size: int64(len(eadata)), + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(eadata) + if err != nil { + return nil, err + } + } + + if hdr.Typeflag == tar.TypeSymlink { + reparse := EncodeReparsePointFromTarHeader(hdr) + bhdr := winio.BackupHeader{ + Id: winio.BackupReparseData, + Size: int64(len(reparse)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(reparse) + if err != nil { + return nil, err + } + } + + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + bhdr := winio.BackupHeader{ + Id: winio.BackupData, + Size: hdr.Size, + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } + // Copy all the alternate data streams and return the next non-ADS header. + for { + ahdr, err := t.Next() + if err != nil { + return nil, err + } + if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { + return ahdr, nil + } + bhdr := winio.BackupHeader{ + Id: winio.BackupAlternateData, + Size: ahdr.Size, + Name: ahdr.Name[len(hdr.Name):] + ":$DATA", + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go new file mode 100644 index 0000000000..1f5bfe2d54 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/doc.go @@ -0,0 +1,22 @@ +// This package provides utilities for efficiently performing Win32 IO operations in Go. +// Currently, this package is provides support for genreal IO and management of +// - named pipes +// - files +// - [Hyper-V sockets] +// +// This code is similar to Go's [net] package, and uses IO completion ports to avoid +// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. +// +// This limits support to Windows Vista and newer operating systems. +// +// Additionally, this package provides support for: +// - creating and managing GUIDs +// - writing to [ETW] +// - opening and manageing VHDs +// - parsing [Windows Image files] +// - auto-generating Win32 API code +// +// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service +// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- +// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images +package winio diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 0000000000..e104dbdfdf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return ea, nb, err + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return ea, nb, err + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return ea, nb, err +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return eas, err +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 0000000000..175a99d3f4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,331 @@ +//go:build windows +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } + +//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (*timeoutError) Error() string { return "i/o timeout" } +func (*timeoutError) Timeout() bool { return true } +func (*timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation. +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO. +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIO() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + socket bool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle. +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIO) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil +} + +// closeHandle closes the resources associated with a Win32 handle. +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + _ = cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// IsClosed checks if the file has been closed. +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + +// prepareIO prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIO() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever. +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// todo: helsaawy - create an asyncIO version that takes a context + +// asyncIO processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno + return int(bytes), err + } + + if f.closing.isSet() { + _ = cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if f.closing.isSet() { + err = ErrFileClosed + } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) + } + case <-timeout: + _ = cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIO() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIO(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIO() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 0000000000..702950e72a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,92 @@ +//go:build windows +// +build windows + +package winio + +import ( + "os" + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime + FileAttributes uint32 + _ uint32 // padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := windows.SetFileInformationByHandle( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), + windows.FileStandardInfo, + (*byte)(unsafe.Pointer(si)), + uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileIdInfo, + (*byte)(unsafe.Pointer(fileID)), + uint32(unsafe.Sizeof(*fileID)), + ); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 0000000000..c881916583 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,575 @@ +//go:build windows +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/socket" + "github.com/Microsoft/go-winio/pkg/guid" +) + +const afHVSock = 34 // AF_HYPERV + +// Well known Service and VM IDs +// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards + +// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. +func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 + return guid.GUID{} +} + +// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. +func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff + return guid.GUID{ + Data1: 0xffffffff, + Data2: 0xffff, + Data3: 0xffff, + Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + } +} + +// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. +func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 + return guid.GUID{ + Data1: 0xe0e16197, + Data2: 0xdd56, + Data3: 0x4a10, + Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, + } +} + +// HvsockGUIDSiloHost is the address of a silo's host partition: +// - The silo host of a hosted silo is the utility VM. +// - The silo host of a silo on a physical host is the physical host. +func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 + return guid.GUID{ + Data1: 0x36bd0c5c, + Data2: 0x7276, + Data3: 0x4223, + Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, + } +} + +// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. +func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd + return guid.GUID{ + Data1: 0x90db8b89, + Data2: 0xd35, + Data3: 0x4f79, + Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, + } +} + +// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. +// Listening on this VmId accepts connection from: +// - Inside silos: silo host partition. +// - Inside hosted silo: host of the VM. +// - Inside VM: VM host. +// - Physical host: Not supported. +func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 + return guid.GUID{ + Data1: 0xa42e7cda, + Data2: 0xd03f, + Data3: 0x480c, + Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, + } +} + +// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. +func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 + return guid.GUID{ + Data2: 0xfacb, + Data3: 0x11e6, + Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, + } +} + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +var _ socket.RawSockaddr = &rawHvsockAddr{} + +// Network returns the address's network name, "hvsock". +func (*HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g := hvsockVsockServiceTemplate() // make a copy + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHVSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// Sockaddr returns a pointer to and the size of this struct. +// +// Implements the [socket.RawSockaddr] interface, and allows use in +// [socket.Bind] and [socket.ConnectEx]. +func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { + return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil +} + +// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. +func (r *rawHvsockAddr) FromBytes(b []byte) error { + n := int(unsafe.Sizeof(rawHvsockAddr{})) + + if len(b) < n { + return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) + } + + copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) + if r.Family != afHVSock { + return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) + } + + return nil +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +var _ net.Listener = &HvsockListener{} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +var _ net.Conn = &HvsockConn{} + +func newHVSocket() (*win32File, error) { + fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHVSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = socket.Bind(windows.Handle(sock.handle), &sa) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHVSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIO() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + // + // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) + if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + + conn := &HvsockConn{ + sock: sock, + } + // The local address returned in the AcceptEx buffer is the same as the Listener socket's + // address. However, the service GUID reported by GetSockName is different from the Listeners + // socket, and is sometimes the same as the local address of the socket that dialed the + // address, with the service GUID.Data1 incremented, but othertimes is different. + // todo: does the local address matter? is the listener's address or the actual address appropriate? + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + + // initialize the accepted socket and update its properties with those of the listening socket + if err = windows.Setsockopt(windows.Handle(sock.handle), + windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, + (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { + return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) + } + + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). +type HvsockDialer struct { + // Deadline is the time the Dial operation must connect before erroring. + Deadline time.Time + + // Retries is the number of additional connects to try if the connection times out, is refused, + // or the host is unreachable + Retries uint + + // RetryWait is the time to wait after a connection error to retry + RetryWait time.Duration + + rt *time.Timer // redial wait timer +} + +// Dial the Hyper-V socket at addr. +// +// See [HvsockDialer.Dial] for more information. +func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + return (&HvsockDialer{}).Dial(ctx, addr) +} + +// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. +// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between +// retries. +// +// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. +func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + op := "dial" + // create the conn early to use opErr() + conn = &HvsockConn{ + remote: *addr, + } + + if !d.Deadline.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, d.Deadline) + defer cancel() + } + + // preemptive timeout/cancellation check + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + sock, err := newHVSocket() + if err != nil { + return nil, conn.opErr(op, err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + + sa := addr.raw() + err = socket.Bind(windows.Handle(sock.handle), &sa) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("bind", err)) + } + + c, err := sock.prepareIO() + if err != nil { + return nil, conn.opErr(op, err) + } + defer sock.wg.Done() + var bytes uint32 + for i := uint(0); i <= d.Retries; i++ { + err = socket.ConnectEx( + windows.Handle(sock.handle), + &sa, + nil, // sendBuf + 0, // sendDataLen + &bytes, + (*windows.Overlapped)(unsafe.Pointer(&c.o))) + _, err = sock.asyncIO(c, nil, bytes, err) + if i < d.Retries && canRedial(err) { + if err = d.redialWait(ctx); err == nil { + continue + } + } + break + } + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) + } + + // update the connection properties, so shutdown can be used + if err = windows.Setsockopt( + windows.Handle(sock.handle), + windows.SOL_SOCKET, + windows.SO_UPDATE_CONNECT_CONTEXT, + nil, // optvalue + 0, // optlen + ); err != nil { + return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) + } + + // get the local name + var sal rawHvsockAddr + err = socket.GetSockName(windows.Handle(sock.handle), &sal) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) + } + conn.local.fromRaw(&sal) + + // one last check for timeout, since asyncIO doesn't check the context + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + conn.sock = sock + sock = nil + + return conn, nil +} + +// redialWait waits before attempting to redial, resetting the timer as appropriate. +func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { + if d.RetryWait == 0 { + return nil + } + + if d.rt == nil { + d.rt = time.NewTimer(d.RetryWait) + } else { + // should already be stopped and drained + d.rt.Reset(d.RetryWait) + } + + select { + case <-ctx.Done(): + case <-d.rt.C: + return nil + } + + // stop and drain the timer + if !d.rt.Stop() { + <-d.rt.C + } + return ctx.Err() +} + +// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +func canRedial(err error) bool { + //nolint:errorlint // guaranteed to be an Errno + switch err { + case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, + windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: + return true + default: + return false + } +} + +func (conn *HvsockConn) opErr(op string, err error) error { + // translate from "file closed" to "socket closed" + if errors.Is(err, ErrFileClosed) { + err = socket.ErrSocketClosed + } + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIO() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsarecv", eno) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIO() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsasend", eno) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + +// shutdown disables sending or receiving on a socket. +func (conn *HvsockConn) shutdown(how int) error { + if conn.IsClosed() { + return socket.ErrSocketClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) + if err != nil { + // If the connection was closed, shutdowns fail with "not connected" + if errors.Is(err, windows.WSAENOTCONN) || + errors.Is(err, windows.WSAESHUTDOWN) { + err = socket.ErrSocketClosed + } + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket, preventing future read operations. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("closeread", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("closewrite", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + // todo: implement `SetDeadline` for `win32File` + if err := conn.SetReadDeadline(t); err != nil { + return fmt.Errorf("set read deadline: %w", err) + } + if err := conn.SetWriteDeadline(t); err != nil { + return fmt.Errorf("set write deadline: %w", err) + } + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go new file mode 100644 index 0000000000..1f65388178 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go @@ -0,0 +1,2 @@ +// This package contains Win32 filesystem functionality. +package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go new file mode 100644 index 0000000000..509b3ec641 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -0,0 +1,202 @@ +//go:build windows + +package fs + +import ( + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/stringbuffer" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go + +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW + +const NullHandle windows.Handle = 0 + +// AccessMask defines standard, specific, and generic rights. +// +// Bitmask: +// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +---------------+---------------+-------------------------------+ +// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | +// |R|W|E|A| |S| | | +// +-+-------------+---------------+-------------------------------+ +// +// GR Generic Read +// GW Generic Write +// GE Generic Exectue +// GA Generic All +// Resvd Reserved +// AS Access Security System +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights +// +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants +type AccessMask = windows.ACCESS_MASK + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // Not actually any. + // + // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters + FILE_ANY_ACCESS AccessMask = 0 + + // Specific Object Access + // from ntioapi.h + + FILE_READ_DATA AccessMask = (0x0001) // file & pipe + FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory + + FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe + FILE_ADD_FILE AccessMask = (0x0002) // directory + + FILE_APPEND_DATA AccessMask = (0x0004) // file + FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory + FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe + + FILE_READ_EA AccessMask = (0x0008) // file & directory + FILE_READ_PROPERTIES AccessMask = FILE_READ_EA + + FILE_WRITE_EA AccessMask = (0x0010) // file & directory + FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA + + FILE_EXECUTE AccessMask = (0x0020) // file + FILE_TRAVERSE AccessMask = (0x0020) // directory + + FILE_DELETE_CHILD AccessMask = (0x0040) // directory + + FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all + + FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all + + FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) + FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) + FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) + FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) + + SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF + + // Standard Access + // from ntseapi.h + + DELETE AccessMask = 0x0001_0000 + READ_CONTROL AccessMask = 0x0002_0000 + WRITE_DAC AccessMask = 0x0004_0000 + WRITE_OWNER AccessMask = 0x0008_0000 + SYNCHRONIZE AccessMask = 0x0010_0000 + + STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 + + STANDARD_RIGHTS_READ AccessMask = READ_CONTROL + STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL + STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL + + STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 +) + +type FileShareMode uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + FILE_SHARE_NONE FileShareMode = 0x00 + FILE_SHARE_READ FileShareMode = 0x01 + FILE_SHARE_WRITE FileShareMode = 0x02 + FILE_SHARE_DELETE FileShareMode = 0x04 + FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 +) + +type FileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // from winbase.h + + CREATE_NEW FileCreationDisposition = 0x01 + CREATE_ALWAYS FileCreationDisposition = 0x02 + OPEN_EXISTING FileCreationDisposition = 0x03 + OPEN_ALWAYS FileCreationDisposition = 0x04 + TRUNCATE_EXISTING FileCreationDisposition = 0x05 +) + +// CreateFile and co. take flags or attributes together as one parameter. +// Define alias until we can use generics to allow both + +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants +type FileFlagOrAttribute uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 + FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 + FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 + FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 + FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 + FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 + FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 + FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 + FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 + FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 + FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 +) + +type FileSQSFlag = FileFlagOrAttribute + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) + SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) + SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) + SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) + + SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 +) + +// GetFinalPathNameByHandle flags +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters +type GetFinalPathFlag uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 + + FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 + FILE_NAME_OPENED GetFinalPathFlag = 0x8 + + VOLUME_NAME_DOS GetFinalPathFlag = 0x0 + VOLUME_NAME_GUID GetFinalPathFlag = 0x1 + VOLUME_NAME_NT GetFinalPathFlag = 0x2 + VOLUME_NAME_NONE GetFinalPathFlag = 0x4 +) + +// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle +// with the given handle and flags. It transparently takes care of creating a buffer of the +// correct size for the call. +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew +func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { + b := stringbuffer.NewWString() + //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? + for { + n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) + if err != nil { + return "", err + } + // If the buffer wasn't large enough, n will be the total size needed (including null terminator). + // Resize and try again. + if n > b.Cap() { + b.ResizeTo(n) + continue + } + // If the buffer is large enough, n will be the size not including the null terminator. + // Convert to a Go string and return. + return b.String(), nil + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go new file mode 100644 index 0000000000..81760ac67e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go @@ -0,0 +1,12 @@ +package fs + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level +type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` + +// Impersonation levels +const ( + SecurityAnonymous SecurityImpersonationLevel = 0 + SecurityIdentification SecurityImpersonationLevel = 1 + SecurityImpersonation SecurityImpersonationLevel = 2 + SecurityDelegation SecurityImpersonationLevel = 3 +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go new file mode 100644 index 0000000000..e2f7bb24e5 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -0,0 +1,64 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package fs + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procCreateFileW = modkernel32.NewProc("CreateFileW") +) + +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go new file mode 100644 index 0000000000..7e82f9afa9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go @@ -0,0 +1,20 @@ +package socket + +import ( + "unsafe" +) + +// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The +// struct must meet the Win32 sockaddr requirements specified here: +// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 +// +// Specifically, the struct size must be least larger than an int16 (unsigned short) +// for the address family. +type RawSockaddr interface { + // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing + // for the RawSockaddr's data to be overwritten by syscalls (if necessary). + // + // It is the callers responsibility to validate that the values are valid; invalid + // pointers or size can cause a panic. + Sockaddr() (unsafe.Pointer, int32, error) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go new file mode 100644 index 0000000000..aeb7b7250f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -0,0 +1,179 @@ +//go:build windows + +package socket + +import ( + "errors" + "fmt" + "net" + "sync" + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go + +//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname +//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername +//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const socketError = uintptr(^uint32(0)) + +var ( + // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? + + ErrBufferSize = errors.New("buffer size") + ErrAddrFamily = errors.New("address family") + ErrInvalidPointer = errors.New("invalid pointer") + ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) +) + +// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) + +// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. +// If rsa is not large enough, the [windows.WSAEFAULT] is returned. +func GetSockName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + // although getsockname returns WSAEFAULT if the buffer is too small, it does not set + // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy + return getsockname(s, ptr, &l) +} + +// GetPeerName returns the remote address the socket is connected to. +// +// See [GetSockName] for more information. +func GetPeerName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return getpeername(s, ptr, &l) +} + +func Bind(s windows.Handle, rsa RawSockaddr) (err error) { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return bind(s, ptr, l) +} + +// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the +// their sockaddr interface, so they cannot be used with HvsockAddr +// Replicate functionality here from +// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go + +// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at +// runtime via a WSAIoctl call: +// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks + +type runtimeFunc struct { + id guid.GUID + once sync.Once + addr uintptr + err error +} + +func (f *runtimeFunc) Load() error { + f.once.Do(func() { + var s windows.Handle + s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) + if f.err != nil { + return + } + defer windows.CloseHandle(s) //nolint:errcheck + + var n uint32 + f.err = windows.WSAIoctl(s, + windows.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&f.id)), + uint32(unsafe.Sizeof(f.id)), + (*byte)(unsafe.Pointer(&f.addr)), + uint32(unsafe.Sizeof(f.addr)), + &n, + nil, // overlapped + 0, // completionRoutine + ) + }) + return f.err +} + +var ( + // todo: add `AcceptEx` and `GetAcceptExSockaddrs` + WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS + Data1: 0x25a207b9, + Data2: 0xddf3, + Data3: 0x4660, + Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, + } + + connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} +) + +func ConnectEx( + fd windows.Handle, + rsa RawSockaddr, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) error { + if err := connectExFunc.Load(); err != nil { + return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) + } + ptr, n, err := rsa.Sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +// BOOL LpfnConnectex( +// [in] SOCKET s, +// [in] const sockaddr *name, +// [in] int namelen, +// [in, optional] PVOID lpSendBuffer, +// [in] DWORD dwSendDataLength, +// [out] LPDWORD lpdwBytesSent, +// [in] LPOVERLAPPED lpOverlapped +// ) + +func connectEx( + s windows.Handle, + name unsafe.Pointer, + namelen int32, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) (err error) { + // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, + 7, + uintptr(s), + uintptr(name), + uintptr(namelen), + uintptr(unsafe.Pointer(sendBuf)), + uintptr(sendDataLen), + uintptr(unsafe.Pointer(bytesSent)), + uintptr(unsafe.Pointer(overlapped)), + 0, + 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go new file mode 100644 index 0000000000..6d2e1a9e44 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -0,0 +1,72 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procbind = modws2_32.NewProc("bind") + procgetpeername = modws2_32.NewProc("getpeername") + procgetsockname = modws2_32.NewProc("getsockname") +) + +func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go new file mode 100644 index 0000000000..7ad5057024 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -0,0 +1,132 @@ +package stringbuffer + +import ( + "sync" + "unicode/utf16" +) + +// TODO: worth exporting and using in mkwinsyscall? + +// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate +// large path strings: +// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. +const MinWStringCap = 310 + +// use *[]uint16 since []uint16 creates an extra allocation where the slice header +// is copied to heap and then referenced via pointer in the interface header that sync.Pool +// stores. +var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly + New: func() interface{} { + b := make([]uint16, MinWStringCap) + return &b + }, +} + +func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } + +// freeBuffer copies the slice header data, and puts a pointer to that in the pool. +// This avoids taking a pointer to the slice header in WString, which can be set to nil. +func freeBuffer(b []uint16) { pathPool.Put(&b) } + +// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings +// for interacting with Win32 APIs. +// Sizes are specified as uint32 and not int. +// +// It is not thread safe. +type WString struct { + // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. + + // raw buffer + b []uint16 +} + +// NewWString returns a [WString] allocated from a shared pool with an +// initial capacity of at least [MinWStringCap]. +// Since the buffer may have been previously used, its contents are not guaranteed to be empty. +// +// The buffer should be freed via [WString.Free] +func NewWString() *WString { + return &WString{ + b: newBuffer(), + } +} + +func (b *WString) Free() { + if b.empty() { + return + } + freeBuffer(b.b) + b.b = nil +} + +// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the +// previous buffer back into pool. +func (b *WString) ResizeTo(c uint32) uint32 { + // allready sufficient (or n is 0) + if c <= b.Cap() { + return b.Cap() + } + + if c <= MinWStringCap { + c = MinWStringCap + } + // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places + if c <= 2*b.Cap() { + c = 2 * b.Cap() + } + + b2 := make([]uint16, c) + if !b.empty() { + copy(b2, b.b) + freeBuffer(b.b) + } + b.b = b2 + return c +} + +// Buffer returns the underlying []uint16 buffer. +func (b *WString) Buffer() []uint16 { + if b.empty() { + return nil + } + return b.b +} + +// Pointer returns a pointer to the first uint16 in the buffer. +// If the [WString.Free] has already been called, the pointer will be nil. +func (b *WString) Pointer() *uint16 { + if b.empty() { + return nil + } + return &b.b[0] +} + +// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. +// +// It assumes that the data is null-terminated. +func (b *WString) String() string { + // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" + // and would make this code Windows-only, which makes no sense. + // So copy UTF16ToString code into here. + // If other windows-specific code is added, switch to [windows.UTF16ToString] + + s := b.b + for i, v := range s { + if v == 0 { + s = s[:i] + break + } + } + return string(utf16.Decode(s)) +} + +// Cap returns the underlying buffer capacity. +func (b *WString) Cap() uint32 { + if b.empty() { + return 0 + } + return b.cap() +} + +func (b *WString) cap() uint32 { return uint32(cap(b.b)) } +func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 0000000000..25cc811031 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,525 @@ +//go:build windows +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "runtime" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/fs" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl + Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl +} + +type ntStatus int32 + +func (status ntStatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + ErrPipeListenerClosed = net.ErrClosed + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + if err := f.SetReadDeadline(t); err != nil { + return err + } + return f.SetWriteDeadline(t) +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { //nolint:errorlint + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil + } + return n, err +} + +func (pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { + for { + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + wh, err := fs.CreateFile(*path, + access, + 0, // mode + nil, // security attributes + fs.OPEN_EXISTING, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + 0, // template file handle + ) + h := syscall.Handle(wh) + if err == nil { + return h, nil + } + if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(10 * time.Millisecond) + } + } +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(2 * time.Second) + } + ctx, cancel := context.WithDeadline(context.Background(), absTimeout) + defer cancel() + conn, err := DialPipeContext(ctx, path) + if errors.Is(err, context.DeadlineExceeded) { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) +} + +// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` +// cancellation or timeout. +func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + var err error + var h syscall.Handle + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) + if err != nil { + return nil, err + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&windows.PIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], + &ntPath, + 0, + 0, + ).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + oa.Attributes = windows.OBJ_CASE_INSENSITIVE + + // The security descriptor is only needed for the first pipe. + if first { + if sd != nil { + l := uint32(len(sd)) + sdb := localAlloc(0, l) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %w", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: windows.SE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } + } + + typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) + if c.MessageMode { + typ |= windows.FILE_PIPE_MESSAGE_TYPE + } + + disposition := uint32(windows.FILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = windows.FILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE + } + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, + access, + &oa, + &iosb, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + disposition, + 0, + typ, + 0, + 0, + 0xffffffff, + uint32(c.InputBufferSize), + uint32(c.OutputBufferSize), + &timeout).Err() + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + runtime.KeepAlive(ntPath) + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size of the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size of the output buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + l := &win32PipeListener{ + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIO() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIO(c, nil, 0, err) + if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 0000000000..48ce4e9243 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,232 @@ +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" //nolint:gosec // not used for secure application + "encoding" + "encoding/binary" + "fmt" + "strconv" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122 section 4.1.1. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 // RFC 4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +func (v Version) String() string { + return strconv.FormatUint(uint64(v), 10) +} + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() //nolint:gosec // not used for secure application + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 0000000000..805bd35484 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,16 @@ +//go:build !windows +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 0000000000..27e45ee5cc --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,13 @@ +//go:build windows +// +build windows + +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go new file mode 100644 index 0000000000..4076d3132f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. + +package guid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VariantUnknown-0] + _ = x[VariantNCS-1] + _ = x[VariantRFC4122-2] + _ = x[VariantMicrosoft-3] + _ = x[VariantFuture-4] +} + +const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" + +var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} + +func (i Variant) String() string { + if i >= Variant(len(_Variant_index)-1) { + return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 0000000000..0ff9dac906 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,197 @@ +//go:build windows +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + //revive:disable-next-line:var-naming ALL_CAPS + SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED + + //revive:disable-next-line:var-naming ALL_CAPS + ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "Could not enable privilege " + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + privileges := make([]uint64, 0, len(names)) + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p := windows.CurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + _ = binary.Write(&b, binary.LittleEndian, p) + _ = binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(windows.SecurityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 0000000000..67d1a104a6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,131 @@ +//go:build windows +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + _ = binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + _ = binary.Write(&b, binary.LittleEndian, flags) + } + + _ = binary.Write(&b, binary.LittleEndian, ntTarget16) + _ = binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 0000000000..5550ef6b61 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,144 @@ +//go:build windows +// +build windows + +package winio + +import ( + "errors" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch { + case errors.Is(e.Err, windows.ERROR_INVALID_SID): + s = "the security ID structure is invalid" + case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +func (e *AccountLookupError) Unwrap() error { return e.Err } + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +func (e *SddlConversionError) Unwrap() error { return e.Err } + +// LookupSidByName looks up the SID of an account by name +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +// LookupNameBySid looks up the name of an account by SID +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupNameBySid(sid string) (name string, err error) { + if sid == "" { + return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} + } + + sidBuffer, err := windows.UTF16PtrFromString(sid) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + var sidPtr *byte + if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { + return "", &AccountLookupError{sid, err} + } + defer localFree(uintptr(unsafe.Pointer(sidPtr))) + + var nameSize, refDomainSize, sidNameUse uint32 + err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{sid, err} + } + + nameBuffer := make([]uint16, nameSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + name = windows.UTF16ToString(nameBuffer) + return name, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to include an arbitrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 0000000000..a6ca111b39 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,5 @@ +//go:build windows + +package winio + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go new file mode 100644 index 0000000000..2aa045843e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package winio + +import _ "golang.org/x/tools/cmd/stringer" diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go new file mode 100644 index 0000000000..b54cad1127 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -0,0 +1,377 @@ +//go:build windows +// +build windows + +package vhd + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zvhd_windows.go vhd.go + +//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk +//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk +//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath + +type ( + CreateVirtualDiskFlag uint32 + VirtualDiskFlag uint32 + AttachVirtualDiskFlag uint32 + DetachVirtualDiskFlag uint32 + VirtualDiskAccessMask uint32 +) + +type VirtualStorageType struct { + DeviceID uint32 + VendorID guid.GUID +} + +type CreateVersion2 struct { + UniqueID guid.GUID + MaximumSize uint64 + BlockSizeInBytes uint32 + SectorSizeInBytes uint32 + PhysicalSectorSizeInByte uint32 + ParentPath *uint16 // string + SourcePath *uint16 // string + OpenFlags uint32 + ParentVirtualStorageType VirtualStorageType + SourceVirtualStorageType VirtualStorageType + ResiliencyGUID guid.GUID +} + +type CreateVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 CreateVersion2 +} + +type OpenVersion2 struct { + GetInfoOnly bool + ReadOnly bool + ResiliencyGUID guid.GUID +} + +type OpenVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 OpenVersion2 +} + +// The higher level `OpenVersion2` struct uses `bool`s to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, +// the internal windows structure uses `BOOL`s aka int32s for these types. `openVersion2` is used for translating +// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. +type openVersion2 struct { + getInfoOnly int32 + readOnly int32 + resiliencyGUID guid.GUID +} + +type openVirtualDiskParameters struct { + version uint32 + version2 openVersion2 +} + +type AttachVersion2 struct { + RestrictedOffset uint64 + RestrictedLength uint64 +} + +type AttachVirtualDiskParameters struct { + Version uint32 + Version2 AttachVersion2 +} + +const ( + //revive:disable-next-line:var-naming ALL_CAPS + VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 + + // Access Mask for opening a VHD. + VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 + VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 + VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 + VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000 + VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000 + VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000 + VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000 + VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000 + VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 + VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 + + // Flags for creating a VHD. + CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 + CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 + CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 + CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4 + CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 + CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 + CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 + CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 //revive:disable-line:var-naming VHD, not Vhd + CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 + CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 //revive:disable-line:var-naming PMEM, not Pmem + CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 + + // Flags for opening a VHD. + OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 + OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 + OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 + OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004 + OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008 + OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010 + OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020 + OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040 + OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080 + OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 + OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 + + // Flags for attaching a VHD. + AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 + AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 + AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 + AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004 + AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008 + AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010 + AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020 + AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040 + AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080 + AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 + AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 + + // Flags for detaching a VHD. + DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 +) + +// CreateVhdx is a helper function to create a simple vhdx file at the given path using +// default values. +// +//revive:disable-next-line:var-naming VHDX, not Vhdx +func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { + params := CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, + BlockSizeInBytes: blockSizeInMb * 1024 * 1024, + }, + } + + handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms) + if err != nil { + return err + } + + return syscall.CloseHandle(handle) +} + +// DetachVirtualDisk detaches a virtual hard disk by handle. +func DetachVirtualDisk(handle syscall.Handle) (err error) { + if err := detachVirtualDisk(handle, 0, 0); err != nil { + return fmt.Errorf("failed to detach virtual disk: %w", err) + } + return nil +} + +// DetachVhd detaches a vhd found at `path`. +// +//revive:disable-next-line:var-naming VHD, not Vhd +func DetachVhd(path string) error { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + defer syscall.CloseHandle(handle) //nolint:errcheck + return DetachVirtualDisk(handle) +} + +// AttachVirtualDisk attaches a virtual hard disk for use. +func AttachVirtualDisk( + handle syscall.Handle, + attachVirtualDiskFlag AttachVirtualDiskFlag, + parameters *AttachVirtualDiskParameters, +) (err error) { + // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. + if err := attachVirtualDisk( + handle, + nil, + uint32(attachVirtualDiskFlag), + 0, + parameters, + nil, + ); err != nil { + return fmt.Errorf("failed to attach virtual disk: %w", err) + } + return nil +} + +// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 +// of the ATTACH_VIRTUAL_DISK_PARAMETERS. +// +//revive:disable-next-line:var-naming VHD, not Vhd +func AttachVhd(path string) (err error) { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + + defer syscall.CloseHandle(handle) //nolint:errcheck + params := AttachVirtualDiskParameters{Version: 2} + if err := AttachVirtualDisk( + handle, + AttachVirtualDiskFlagNone, + ¶ms, + ); err != nil { + return fmt.Errorf("failed to attach virtual disk: %w", err) + } + return nil +} + +// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. +func OpenVirtualDisk( + vhdPath string, + virtualDiskAccessMask VirtualDiskAccessMask, + openVirtualDiskFlags VirtualDiskFlag, +) (syscall.Handle, error) { + parameters := OpenVirtualDiskParameters{Version: 2} + handle, err := OpenVirtualDiskWithParameters( + vhdPath, + virtualDiskAccessMask, + openVirtualDiskFlags, + ¶meters, + ) + if err != nil { + return 0, err + } + return handle, nil +} + +// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. +func OpenVirtualDiskWithParameters( + vhdPath string, + virtualDiskAccessMask VirtualDiskAccessMask, + openVirtualDiskFlags VirtualDiskFlag, + parameters *OpenVirtualDiskParameters, +) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + getInfoOnly int32 + readOnly int32 + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + if parameters.Version2.GetInfoOnly { + getInfoOnly = 1 + } + if parameters.Version2.ReadOnly { + readOnly = 1 + } + params := &openVirtualDiskParameters{ + version: parameters.Version, + version2: openVersion2{ + getInfoOnly, + readOnly, + parameters.Version2.ResiliencyGUID, + }, + } + if err := openVirtualDisk( + &defaultType, + vhdPath, + uint32(virtualDiskAccessMask), + uint32(openVirtualDiskFlags), + params, + &handle, + ); err != nil { + return 0, fmt.Errorf("failed to open virtual disk: %w", err) + } + return handle, nil +} + +// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. +func CreateVirtualDisk( + path string, + virtualDiskAccessMask VirtualDiskAccessMask, + createVirtualDiskFlags CreateVirtualDiskFlag, + parameters *CreateVirtualDiskParameters, +) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + + if err := createVirtualDisk( + &defaultType, + path, + uint32(virtualDiskAccessMask), + nil, + uint32(createVirtualDiskFlags), + 0, + parameters, + nil, + &handle, + ); err != nil { + return handle, fmt.Errorf("failed to create virtual disk: %w", err) + } + return handle, nil +} + +// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical +// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer +// that represents the particular enumeration of the physical disk on the caller's system. +func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { + var ( + diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars + diskPhysicalPathBuf [256]uint16 + ) + if err := getVirtualDiskPhysicalPath( + handle, + &diskPathSizeInBytes, + &diskPhysicalPathBuf[0], + ); err != nil { + return "", fmt.Errorf("failed to get disk physical path: %w", err) + } + return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil +} + +// CreateDiffVhd is a helper function to create a differencing virtual disk. +// +//revive:disable-next-line:var-naming VHD, not Vhd +func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { + // Setting `ParentPath` is how to signal to create a differencing disk. + createParams := &CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + ParentPath: windows.StringToUTF16Ptr(baseVhdPath), + BlockSizeInBytes: blockSizeInMB * 1024 * 1024, + OpenFlags: uint32(OpenVirtualDiskFlagCachedIO), + }, + } + + vhdHandle, err := CreateVirtualDisk( + diffVhdPath, + VirtualDiskAccessNone, + CreateVirtualDiskFlagNone, + createParams, + ) + if err != nil { + return fmt.Errorf("failed to create differencing vhd: %w", err) + } + if err := syscall.CloseHandle(vhdHandle); err != nil { + return fmt.Errorf("failed to close differencing vhd handle: %w", err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go new file mode 100644 index 0000000000..d0e917d2be --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -0,0 +1,108 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package vhd + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk") + procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk") + procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") +) + +func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) { + r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { + return + } + return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) +} + +func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) { + r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) { + r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 0000000000..469b16f639 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,419 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") +) + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSidToSid(str *uint16, sid **byte) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + err = errnoErr(e1) + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntStatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntStatus(r0) + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntStatus(r0) + return +} + +func rtlNtStatusToDosError(status ntStatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/.gitattributes b/vendor/github.com/Microsoft/hcsshim/.gitattributes new file mode 100644 index 0000000000..dd0d09faac --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +vendor/** -text +test/vendor/** -text \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore new file mode 100644 index 0000000000..74b68f0ad9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -0,0 +1,53 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Ignore vscode setting files +.vscode/ +.idea/ + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Ignore gcs bin directory +service/bin/ +service/pkg/ + +*.img +*.vhd +*.tar.gz +*.tar + +# Make stuff +.rootfs-done +bin/* +rootfs/* +rootfs-conv/* +*.o +/build/ + +deps/* +out/* + +# protobuf files +# only files at root of the repo, otherwise this will cause issues with vendoring +/protobuf/* + +# test results +test/results + +# go workspace files +go.work +go.work.sum + +# keys and related artifacts +*.pem +*.cose diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml new file mode 100644 index 0000000000..abe77f57a0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -0,0 +1,167 @@ +run: + timeout: 8m + tests: true + build-tags: + - admin + - functional + - integration + skip-dirs: + # paths are relative to module root + - cri-containerd/test-images + +linters: + enable: + # defaults: + # - errcheck + # - gosimple + # - govet + # - ineffassign + # - staticcheck + # - typecheck + # - unused + + - gofmt # whether code was gofmt-ed + - govet # enabled by default, but just to be sure + - nolintlint # ill-formed or insufficient nolint directives + - stylecheck # golint replacement + - thelper # test helpers without t.Helper() + +linters-settings: + govet: + enable-all: true + disable: + # struct order is often for Win32 compat + # also, ignore pointer bytes/GC issues for now until performance becomes an issue + - fieldalignment + check-shadowing: true + + stylecheck: + # https://staticcheck.io/docs/checks + checks: ["all"] + +issues: + exclude-rules: + # err is very often shadowed in nested scopes + - linters: + - govet + text: '^shadow: declaration of "err" shadows declaration' + + # path is relative to module root, which is ./test/ + - path: cri-containerd + linters: + - stylecheck + text: "^ST1003: should not use underscores in package names$" + source: "^package cri_containerd$" + + # This repo has a LOT of generated schema files, operating system bindings, and other + # things that ST1003 from stylecheck won't like (screaming case Windows api constants for example). + # There's also some structs that we *could* change the initialisms to be Go friendly + # (Id -> ID) but they're exported and it would be a breaking change. + # This makes it so that most new code, code that isn't supposed to be a pretty faithful + # mapping to an OS call/constants, or non-generated code still checks if we're following idioms, + # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. + - path: layer.go + linters: + - stylecheck + Text: "ST1003:" + + - path: hcsshim.go + linters: + - stylecheck + Text: "ST1003:" + + - path: cmd\\ncproxy\\nodenetsvc\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: cmd\\ncproxy_mock\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema2\\ + linters: + - stylecheck + - gofmt + + - path: internal\\wclayer\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: hcn\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema1\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hns\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\compactext4\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\format\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guestrequest\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guest\\prot\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\windevice\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\winapi\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\vmcompute\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\regstate\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcserror\\ + linters: + - stylecheck + Text: "ST1003:" + + # v0 APIs are deprecated, but still retained for backwards compatability + - path: cmd\\ncproxy\\ + linters: + - staticcheck + text: "^SA1019: .*(ncproxygrpc|nodenetsvc)[/]?v0" + + - path: internal\\tools\\networkagent + linters: + - staticcheck + text: "^SA1019: .*nodenetsvc[/]?v0" + + - path: internal\\vhdx\\info + linters: + - stylecheck + Text: "ST1003:" diff --git a/vendor/github.com/Microsoft/hcsshim/CODEOWNERS b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS new file mode 100644 index 0000000000..f4c5a07d14 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS @@ -0,0 +1 @@ +* @microsoft/containerplat \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 0000000000..49d21669ae --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile new file mode 100644 index 0000000000..d8eb30b863 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -0,0 +1,111 @@ +BASE:=base.tar.gz +DEV_BUILD:=0 + +GO:=go +GO_FLAGS:=-ldflags "-s -w" # strip Go binaries +CGO_ENABLED:=0 +GOMODVENDOR:= + +CFLAGS:=-O2 -Wall +LDFLAGS:=-static -s # strip C binaries + +GO_FLAGS_EXTRA:= +ifeq "$(GOMODVENDOR)" "1" +GO_FLAGS_EXTRA += -mod=vendor +endif +GO_BUILD_TAGS:= +ifneq ($(strip $(GO_BUILD_TAGS)),) +GO_FLAGS_EXTRA += -tags="$(GO_BUILD_TAGS)" +endif +GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA) + +SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) +# additional directories to search for rule prerequisites and targets +VPATH=$(SRCROOT) + +DELTA_TARGET=out/delta.tar.gz + +ifeq "$(DEV_BUILD)" "1" +DELTA_TARGET=out/delta-dev.tar.gz +endif + +# The link aliases for gcstools +GCS_TOOLS=\ + generichook \ + install-drivers + +.PHONY: all always rootfs test + +.DEFAULT_GOAL := all + +all: out/initrd.img out/rootfs.tar.gz + +clean: + find -name '*.o' -print0 | xargs -0 -r rm + rm -rf bin deps rootfs out + +test: + cd $(SRCROOT) && $(GO) test -v ./internal/guest/... + +rootfs: out/rootfs.vhd + +out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4 + gzip -f -d ./out/rootfs.tar.gz + bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@ + +out/rootfs.tar.gz: out/initrd.img + rm -rf rootfs-conv + mkdir rootfs-conv + gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) + tar -zcf $@ -C rootfs-conv . + rm -rf rootfs-conv + +out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed + gzip -c out/initrd.img.uncompressed > $@ + rm out/initrd.img.uncompressed + +# This target includes utilities which may be useful for testing purposes. +out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report + rm -rf rootfs-dev + mkdir rootfs-dev + tar -xzf out/delta.tar.gz -C rootfs-dev + cp bin/internal/tools/snp-report rootfs-dev/bin/ + tar -zcf $@ -C rootfs-dev . + rm -rf rootfs-dev + +out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile + @mkdir -p out + rm -rf rootfs + mkdir -p rootfs/bin/ + mkdir -p rootfs/info/ + cp bin/init rootfs/ + cp bin/vsockexec rootfs/bin/ + cp bin/cmd/gcs rootfs/bin/ + cp bin/cmd/gcstools rootfs/bin/ + cp bin/cmd/hooks/wait-paths rootfs/bin/ + for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done + git -C $(SRCROOT) rev-parse HEAD > rootfs/info/gcs.commit && \ + git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/info/gcs.branch && \ + date --iso-8601=minute --utc > rootfs/info/tar.date + $(if $(and $(realpath $(subst .tar,.testdata.json,$(BASE))), $(shell which jq)), \ + jq -r '.IMAGE_NAME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/image.name && \ + jq -r '.DATETIME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/build.date) + tar -zcf $@ -C rootfs . + rm -rf rootfs + +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: + @mkdir -p $(dir $@) + GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) + +bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +bin/init: init/init.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +%.o: %.c + @mkdir -p $(dir $@) + $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml new file mode 100644 index 0000000000..17145bb258 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml @@ -0,0 +1,25 @@ +version = "2" +generators = ["go", "go-grpc"] + +# Control protoc include paths. +[includes] + before = ["./protobuf"] + + # defaults are "/usr/local/include" and "/usr/include", which don't exist on Windows. + # override defaults to supress errors about non-existant directories. + after = [] + +# This section maps protobuf imports to Go packages. +[packages] + # github.com/containerd/cgroups protofiles still list their go path as "github.com/containerd/cgroups/cgroup1/stats" + "github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto" = "github.com/containerd/cgroups/v3/cgroup1/stats" + +[[overrides]] +prefixes = [ + "github.com/Microsoft/hcsshim/internal/shimdiag", + "github.com/Microsoft/hcsshim/internal/extendedtask", + "github.com/Microsoft/hcsshim/internal/computeagent", + "github.com/Microsoft/hcsshim/internal/ncproxyttrpc", + "github.com/Microsoft/hcsshim/internal/vmservice", +] +generators = ["go", "go-ttrpc"] diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md new file mode 100644 index 0000000000..5a1361539b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -0,0 +1,102 @@ +# hcsshim + +[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) + +This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers. + +It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well. + +## Building + +While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). +### Linux Hyper-V Container Guest Agent + +To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. +```powershell +C:\> $env:GOOS="linux" +C:\> go build .\cmd\gcs\ +``` + +or on a Linux machine +```sh +> go build ./cmd/gcs +``` + +If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container. + +```sh +docker pull busybox +docker run --name base_image_container busybox +docker export base_image_container | gzip > base.tar.gz +BASE=./base.tar.gz +make all +``` + +If the build is successful, in the `./out` folder you should see: +```sh +> ls ./out/ +delta.tar.gz initrd.img rootfs.tar.gz +``` + +### Containerd Shim +For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. + +Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. + +```powershell +C:\> $env:GOOS="windows" +C:\> go build .\cmd\containerd-shim-runhcs-v1 +``` + +Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: +```powershell +.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii +``` + +This config file will already have the shim set as the default runtime for cri interactions. + +To trial using the shim out with ctr.exe: +```powershell +C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" +``` + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to +certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for +more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure +that all commits in a given PR are signed-off. + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Dependencies + +This project requires Golang 1.17 or newer to build. + +For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). + +## Reporting Security Issues + +Security issues and bugs should be reported privately, via email, to the Microsoft Security +Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should +receive a response within 24 hours. If for some reason you do not, please follow up via +email to ensure we received your original message. Further information, including the +[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in +the [Security TechCenter](https://technet.microsoft.com/en-us/security/default). + +For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet + +--------------- +Copyright (c) 2018 Microsoft Corp. All rights reserved. diff --git a/vendor/github.com/Microsoft/hcsshim/SECURITY.md b/vendor/github.com/Microsoft/hcsshim/SECURITY.md new file mode 100644 index 0000000000..869fdfe2b2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go new file mode 100644 index 0000000000..54c4b3bc4a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -0,0 +1,40 @@ +//go:build windows + +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// AttachLayerStorageFilter sets up the layer storage filter on a writable +// container layer. +// +// `layerPath` is a path to a directory the writable layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim::AttachLayerStorageFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to attach layer storage filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go new file mode 100644 index 0000000000..5058d3b55e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go @@ -0,0 +1,28 @@ +//go:build windows + +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DestroyLayer deletes a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DestroyLayer(ctx context.Context, layerPath string) (err error) { + title := "hcsshim::DestroyLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDestroyLayer(layerPath) + if err != nil { + return errors.Wrap(err, "failed to destroy layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go new file mode 100644 index 0000000000..daf1bfff20 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -0,0 +1,28 @@ +//go:build windows + +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { + title := "hcsshim::DetachLayerStorageFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDetachLayerStorageFilter(layerPath) + if err != nil { + return errors.Wrap(err, "failed to detach layer storage filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go new file mode 100644 index 0000000000..c6370a5c9a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go @@ -0,0 +1,48 @@ +//go:build windows + +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ExportLayer exports a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +// +// `exportFolderPath` is a pre-existing folder to export the layer to. +// +// `layerData` is the parent layer data. +// +// `options` are the export options applied to the exported layer. +func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { + title := "hcsshim::ExportLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("exportFolderPath", exportFolderPath), + ) + + ldBytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + oBytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsExportLayer(layerPath, exportFolderPath, string(ldBytes), string(oBytes)) + if err != nil { + return errors.Wrap(err, "failed to export layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go new file mode 100644 index 0000000000..2140e5c9fc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go @@ -0,0 +1,32 @@ +//go:build windows + +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. +// +// If the VHD is not mounted it will be temporarily mounted. +// +// NOTE: This API had a breaking change in the operating system after Windows Server 2019. +// On ws2019 the API expects to get passed a file handle from CreateFile for the vhd that +// the caller wants to format. On > ws2019, its expected that the caller passes a vhd handle +// that can be obtained from the virtdisk APIs. +func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { + title := "hcsshim::FormatWritableLayerVhd" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + err = hcsFormatWritableLayerVhd(vhdHandle) + if err != nil { + return errors.Wrap(err, "failed to format writable layer vhd") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go new file mode 100644 index 0000000000..858c84601c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go @@ -0,0 +1,199 @@ +//go:build windows + +package computestorage + +import ( + "context" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/internal/memory" + "github.com/pkg/errors" + "golang.org/x/sys/windows" + + "github.com/Microsoft/hcsshim/internal/security" +) + +const ( + defaultVHDXBlockSizeInMB = 1 +) + +// SetupContainerBaseLayer is a helper to setup a containers scratch. It +// will create and format the vhdx's inside and the size is configurable with the sizeInGB +// parameter. +// +// `layerPath` is the path to the base container layer on disk. +// +// `baseVhdPath` is the path to where the base vhdx for the base layer should be created. +// +// `diffVhdPath` is the path where the differencing disk for the base layer should be created. +// +// `sizeInGB` is the size in gigabytes to make the base vhdx. +func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + var ( + hivesPath = filepath.Join(layerPath, "Hives") + layoutPath = filepath.Join(layerPath, "Layout") + ) + + // We need to remove the hives directory and layout file as `SetupBaseOSLayer` fails if these files + // already exist. `SetupBaseOSLayer` will create these files internally. We also remove the base and + // differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(hivesPath); err == nil { + if err := os.RemoveAll(hivesPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting hives directory") + } + } + if _, err := os.Stat(layoutPath); err == nil { + if err := os.RemoveAll(layoutPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting layout file") + } + } + + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx path") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * memory.GiB, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + if err = FormatWritableLayerVhd(ctx, windows.Handle(handle)); err != nil { + return err + } + // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + options := OsLayerOptions{ + Type: OsLayerTypeContainer, + } + + // SetupBaseOSLayer expects an empty vhd handle for a container layer and will + // error out otherwise. + if err = SetupBaseOSLayer(ctx, layerPath, 0, options); err != nil { + return err + } + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} + +// SetupUtilityVMBaseLayer is a helper to setup a UVMs scratch space. It will create and format +// the vhdx inside and the size is configurable by the sizeInGB parameter. +// +// `uvmPath` is the path to the UtilityVM filesystem. +// +// `baseVhdPath` is the path to where the base vhdx for the UVM should be created. +// +// `diffVhdPath` is the path where the differencing disk for the UVM should be created. +// +// `sizeInGB` specifies the size in gigabytes to make the base vhdx. +func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + // Remove the base and differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + // Just create the vhdx for utilityVM layer, no need to format it. + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * memory.GiB, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + // If it is a UtilityVM layer then the base vhdx must be attached when calling + // `SetupBaseOSLayer` + attachParams := &vhd.AttachVirtualDiskParameters{ + Version: 2, + } + if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { + return errors.Wrapf(err, "failed to attach virtual disk") + } + + options := OsLayerOptions{ + Type: OsLayerTypeVM, + } + if err := SetupBaseOSLayer(ctx, uvmPath, windows.Handle(handle), options); err != nil { + return err + } + + // Detach and close the handle after setting up the layer as we don't need the handle + // for anything else and we no longer need to be attached either. + if err = vhd.DetachVirtualDisk(handle); err != nil { + return errors.Wrap(err, "failed to detach vhdx") + } + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go new file mode 100644 index 0000000000..e1c87416a3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go @@ -0,0 +1,43 @@ +//go:build windows + +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ImportLayer imports a container layer. +// +// `layerPath` is a path to a directory to import the layer to. If the directory +// does not exist it will be automatically created. +// +// `sourceFolderpath` is a pre-existing folder that contains the layer to +// import. +// +// `layerData` is the parent layer data. +func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { + title := "hcsshim::ImportLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("sourceFolderPath", sourceFolderPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to import layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go new file mode 100644 index 0000000000..d0c6216056 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go @@ -0,0 +1,40 @@ +//go:build windows + +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// InitializeWritableLayer initializes a writable layer for a container. +// +// `layerPath` is a path to a directory the layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim::InitializeWritableLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + // Options are not used in the platform as of RS5 + err = hcsInitializeWritableLayer(layerPath, string(bytes), "") + if err != nil { + return errors.Wrap(err, "failed to intitialize container layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go new file mode 100644 index 0000000000..4f4d8ebf2f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go @@ -0,0 +1,28 @@ +//go:build windows + +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. +func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { + title := "hcsshim::GetLayerVhdMountPath" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + var mountPath *uint16 + err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) + if err != nil { + return "", errors.Wrap(err, "failed to get vhd mount path") + } + path = interop.ConvertAndFreeCoTaskMemString(mountPath) + return path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go new file mode 100644 index 0000000000..1c685aed0a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go @@ -0,0 +1,80 @@ +//go:build windows + +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// SetupBaseOSLayer sets up a layer that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `vhdHandle` is an empty file handle of `options.Type == OsLayerTypeContainer` +// or else it is a file handle to the 'SystemTemplateBase.vhdx' if `options.Type +// == OsLayerTypeVm`. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { + title := "hcsshim::SetupBaseOSLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} + +// SetupBaseOSVolume sets up a volume that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `volumePath` is the path to the volume to be used for setup. +// +// `options` are the options applied while processing the layer. +// +// NOTE: This API is only available on builds of Windows greater than 19645. Inside we +// check if the hosts build has the API available by using 'GetVersion' which requires +// the calling application to be manifested. https://docs.microsoft.com/en-us/windows/win32/sbscs/manifests +func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { + if osversion.Build() < 19645 { + return errors.New("SetupBaseOSVolume is not present on builds older than 19645") + } + title := "hcsshim::SetupBaseOSVolume" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("volumePath", volumePath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go new file mode 100644 index 0000000000..c38d3aa5a9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -0,0 +1,53 @@ +// Package computestorage is a wrapper around the HCS storage APIs. These are new storage APIs introduced +// separate from the original graphdriver calls intended to give more freedom around creating +// and managing container layers and scratch spaces. +package computestorage + +import ( + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go storage.go + +//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? +//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? +//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestroyLayer? +//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer? +//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer? +//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter? +//sys hcsDetachLayerStorageFilter(layerPath string) (hr error) = computestorage.HcsDetachLayerStorageFilter? +//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? +//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? +//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? + +type Version = hcsschema.Version +type Layer = hcsschema.Layer + +// LayerData is the data used to describe parent layer information. +type LayerData struct { + SchemaVersion Version `json:"SchemaVersion,omitempty"` + Layers []Layer `json:"Layers,omitempty"` +} + +// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. +type ExportLayerOptions struct { + IsWritableLayer bool `json:"IsWritableLayer,omitempty"` +} + +// OsLayerType is the type of layer being operated on. +type OsLayerType string + +const ( + // OsLayerTypeContainer is a container layer. + OsLayerTypeContainer OsLayerType = "Container" + // OsLayerTypeVM is a virtual machine layer. + OsLayerTypeVM OsLayerType = "Vm" +) + +// OsLayerOptions are the set of options that are used with the `SetupBaseOSLayer` and +// `SetupBaseOSVolume` calls. +type OsLayerOptions struct { + Type OsLayerType `json:"Type,omitempty"` + DisableCiCacheOptimization bool `json:"DisableCiCacheOptimization,omitempty"` + SkipUpdateBcdForBoot bool `json:"SkipUpdateBcdForBoot,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go new file mode 100644 index 0000000000..b996b35e6b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -0,0 +1,332 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package computestorage + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") + + procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsDestroyLayer = modcomputestorage.NewProc("HcsDestroyLayer") + procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") + procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") + procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") + procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") + procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") + procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") + procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume") +) + +func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsAttachLayerStorageFilter(_p0, _p1) +} + +func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { + hr = procHcsAttachLayerStorageFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDestroyLayer(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDestroyLayer(_p0) +} + +func _hcsDestroyLayer(layerPath *uint16) (hr error) { + hr = procHcsDestroyLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDestroyLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDetachLayerStorageFilter(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDetachLayerStorageFilter(_p0) +} + +func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { + hr = procHcsDetachLayerStorageFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(exportFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p3 *uint16 + _p3, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsExportLayer(_p0, _p1, _p2, _p3) +} + +func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) { + hr = procHcsExportLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { + hr = procHcsFormatWritableLayerVhd.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { + hr = procHcsGetLayerVhdMountPath.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsImportLayer(_p0, _p1, _p2) +} + +func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { + hr = procHcsImportLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(writableLayerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsInitializeWritableLayer(_p0, _p1, _p2) +} + +func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) { + hr = procHcsInitializeWritableLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSLayer(_p0, handle, _p1) +} + +func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { + hr = procHcsSetupBaseOSLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSVolume(_p0, _p1, _p2) +} + +func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) { + hr = procHcsSetupBaseOSVolume.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go new file mode 100644 index 0000000000..c8f09f88b9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -0,0 +1,225 @@ +//go:build windows + +package hcsshim + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + "github.com/Microsoft/hcsshim/internal/mergemaps" +) + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties = schema1.ContainerProperties + +// MemoryStats holds the memory statistics for a container +type MemoryStats = schema1.MemoryStats + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats = schema1.ProcessorStats + +// StorageStats holds the storage statistics for a container +type StorageStats = schema1.StorageStats + +// NetworkStats holds the network statistics for a container +type NetworkStats = schema1.NetworkStats + +// Statistics is the structure returned by a statistics call on a container +type Statistics = schema1.Statistics + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem = schema1.ProcessListItem + +// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container +type MappedVirtualDiskController = schema1.MappedVirtualDiskController + +// Type of Request Support in ModifySystem +type RequestType = schema1.RequestType + +// Type of Resource Support in ModifySystem +type ResourceType = schema1.ResourceType + +// RequestType const +const ( + Add = schema1.Add + Remove = schema1.Remove + Network = schema1.Network +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse + +type container struct { + system *hcs.System + waitOnce sync.Once + waitErr error + waitCh chan struct{} +} + +// createContainerAdditionalJSON is read from the environment at initialization +// time. It allows an environment variable to define additional JSON which +// is merged in the CreateComputeSystem call to HCS. +var createContainerAdditionalJSON []byte + +func init() { + createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")) +} + +// CreateContainer creates a new container with the given configuration but does not start it. +func CreateContainer(id string, c *ContainerConfig) (Container, error) { + fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) + if err != nil { + return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) + } + + system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) + if err != nil { + return nil, err + } + return &container{system: system}, err +} + +// OpenContainer opens an existing container by ID. +func OpenContainer(id string) (Container, error) { + system, err := hcs.OpenComputeSystem(context.Background(), id) + if err != nil { + return nil, err + } + return &container{system: system}, err +} + +// GetContainers gets a list of the containers on the system that match the query +func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) { + return hcs.GetComputeSystems(context.Background(), q) +} + +// Start synchronously starts the container. +func (container *container) Start() error { + return convertSystemError(container.system.Start(context.Background()), container) +} + +// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. +func (container *container) Shutdown() error { + err := container.system.Shutdown(context.Background()) + if err != nil { + return convertSystemError(err, container) + } + return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"} +} + +// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. +func (container *container) Terminate() error { + err := container.system.Terminate(context.Background()) + if err != nil { + return convertSystemError(err, container) + } + return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"} +} + +// Waits synchronously waits for the container to shutdown or terminate. +func (container *container) Wait() error { + err := container.system.Wait() + if err == nil { + err = container.system.ExitError() + } + return convertSystemError(err, container) +} + +// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It +// returns false if timeout occurs. +func (container *container) WaitTimeout(timeout time.Duration) error { + container.waitOnce.Do(func() { + container.waitCh = make(chan struct{}) + go func() { + container.waitErr = container.Wait() + close(container.waitCh) + }() + }) + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-t.C: + return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"} + case <-container.waitCh: + return container.waitErr + } +} + +// Pause pauses the execution of a container. +func (container *container) Pause() error { + return convertSystemError(container.system.Pause(context.Background()), container) +} + +// Resume resumes the execution of a container. +func (container *container) Resume() error { + return convertSystemError(container.system.Resume(context.Background()), container) +} + +// HasPendingUpdates returns true if the container has updates pending to install +func (container *container) HasPendingUpdates() (bool, error) { + return false, nil +} + +// Statistics returns statistics for the container. This is a legacy v1 call +func (container *container) Statistics() (Statistics, error) { + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics) + if err != nil { + return Statistics{}, convertSystemError(err, container) + } + + return properties.Statistics, nil +} + +// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call +func (container *container) ProcessList() ([]ProcessListItem, error) { + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList) + if err != nil { + return nil, convertSystemError(err, container) + } + + return properties.ProcessList, nil +} + +// This is a legacy v1 call +func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) { + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk) + if err != nil { + return nil, convertSystemError(err, container) + } + + return properties.MappedVirtualDiskControllers, nil +} + +// CreateProcess launches a new process within the container. +func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { + p, err := container.system.CreateProcess(context.Background(), c) + if err != nil { + return nil, convertSystemError(err, container) + } + return &process{p: p.(*hcs.Process)}, nil +} + +// OpenProcess gets an interface to an existing process within the container. +func (container *container) OpenProcess(pid int) (Process, error) { + p, err := container.system.OpenProcess(context.Background(), pid) + if err != nil { + return nil, convertSystemError(err, container) + } + return &process{p: p}, nil +} + +// Close cleans up any state associated with the container but does not terminate or wait for it. +func (container *container) Close() error { + return convertSystemError(container.system.Close(), container) +} + +// Modify the System +func (container *container) Modify(config *ResourceModificationRequestResponse) error { + return convertSystemError(container.system.Modify(context.Background(), config), container) +} diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go new file mode 100644 index 0000000000..594bbfb7a8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -0,0 +1,250 @@ +//go:build windows + +package hcsshim + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/hcsshim/internal/hns" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcserror" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist + ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = hcs.ErrElementNotFound + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = hcs.ErrNotSupported + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = hcs.ErrInvalidData + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = hcs.ErrHandleClose + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = hcs.ErrAlreadyClosed + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = hcs.ErrInvalidNotificationType + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = hcs.ErrInvalidProcessState + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = hcs.ErrTimeout + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = hcs.ErrUnexpectedValue + + // ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied + ErrOperationDenied = hcs.ErrOperationDenied + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState + + // ErrProcNotFound is an error encountered when a procedure look up fails. + ErrProcNotFound = hcs.ErrProcNotFound + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = hcs.ErrPlatformNotSupported +) + +type EndpointNotFoundError = hns.EndpointNotFoundError +type NetworkNotFoundError = hns.NetworkNotFoundError + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + Process *process + Operation string + Err error + Events []hcs.ErrorEvent +} + +// ContainerError is an error encountered in HCS during an operation on a Container object +type ContainerError struct { + Container *container + Operation string + Err error + Events []hcs.ErrorEvent +} + +func (e *ContainerError) Error() string { + if e == nil { + return "" + } + + if e.Container == nil { + return "unexpected nil container for error: " + e.Err.Error() + } + + s := "container " + e.Container.system.ID() + + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + for _, ev := range e.Events { + s += "\n" + ev.String() + } + + return s +} + +func (e *ProcessError) Error() string { + if e == nil { + return "" + } + + if e.Process == nil { + return "Unexpected nil process for error: " + e.Err.Error() + } + + s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID()) + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + for _, ev := range e.Events { + s += "\n" + ev.String() + } + + return s +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsNotExist(err error) bool { + if _, ok := err.(EndpointNotFoundError); ok { + return true + } + if _, ok := err.(NetworkNotFoundError); ok { + return true + } + return hcs.IsNotExist(getInnerError(err)) +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + return hcs.IsAlreadyClosed(getInnerError(err)) +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + return hcs.IsPending(getInnerError(err)) +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + return hcs.IsTimeout(getInnerError(err)) +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsAlreadyStopped(err error) bool { + return hcs.IsAlreadyStopped(getInnerError(err)) +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + return hcs.IsNotSupported(getInnerError(err)) +} + +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + return hcs.IsOperationInvalidState(getInnerError(err)) +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + return hcs.IsAccessIsDenied(getInnerError(err)) +} + +func getInnerError(err error) error { + switch pe := err.(type) { + case nil: + return nil + case *ContainerError: + err = pe.Err + case *ProcessError: + err = pe.Err + } + return err +} + +func convertSystemError(err error, c *container) error { + if serr, ok := err.(*hcs.SystemError); ok { + return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} + } + return err +} + +func convertProcessError(err error, p *process) error { + if perr, ok := err.(*hcs.ProcessError); ok { + return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go new file mode 100644 index 0000000000..13f80e4a81 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcsshim.go @@ -0,0 +1,30 @@ +//go:build windows + +// Shim for the Host Compute Service (HCS) to manage Windows Server +// containers and Hyper-V containers. + +package hcsshim + +import ( + "golang.org/x/sys/windows" + + "github.com/Microsoft/hcsshim/internal/hcserror" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go hcsshim.go + +//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId + +const ( + // Specific user-visible exit codes + WaitErrExecFailed = 32767 + + ERROR_GEN_FAILURE = windows.ERROR_GEN_FAILURE + ERROR_SHUTDOWN_IN_PROGRESS = windows.ERROR_SHUTDOWN_IN_PROGRESS + WSAEINVAL = windows.WSAEINVAL + + // Timeout on wait calls + TimeoutInfinite = 0xFFFFFFFF +) + +type HcsError = hcserror.HcsError diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go new file mode 100644 index 0000000000..d8a73de98d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -0,0 +1,120 @@ +//go:build windows + +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint = hns.HNSEndpoint + +// HNSEndpointStats represent the stats for an networkendpoint in HNS +type HNSEndpointStats = hns.EndpointStats + +// Namespace represents a Compartment. +type Namespace = hns.Namespace + +// SystemType represents the type of the system on which actions are done +type SystemType string + +// SystemType const +const ( + ContainerType SystemType = "Container" + VirtualMachineType SystemType = "VirtualMachine" + HostType SystemType = "Host" +) + +// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type EndpointAttachDetachRequest = hns.EndpointAttachDetachRequest + +// EndpointResquestResponse is object to get the endpoint request response +type EndpointResquestResponse = hns.EndpointResquestResponse + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + return hns.HNSEndpointRequest(method, path, request) +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + return hns.HNSListEndpointRequest() +} + +// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container +func HotAttachEndpoint(containerID string, endpointID string) error { + endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } + isAttached, err := endpoint.IsAttached(containerID) + if isAttached { + return err + } + return modifyNetworkEndpoint(containerID, endpointID, Add) +} + +// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container +func HotDetachEndpoint(containerID string, endpointID string) error { + endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } + isAttached, err := endpoint.IsAttached(containerID) + if !isAttached { + return err + } + return modifyNetworkEndpoint(containerID, endpointID, Remove) +} + +// ModifyContainer corresponding to the container id, by sending a request +func modifyContainer(id string, request *ResourceModificationRequestResponse) error { + container, err := OpenContainer(id) + if err != nil { + if IsNotExist(err) { + return ErrComputeSystemDoesNotExist + } + return getInnerError(err) + } + defer container.Close() + err = container.Modify(request) + if err != nil { + if IsNotSupported(err) { + return ErrPlatformNotSupported + } + return getInnerError(err) + } + + return nil +} + +func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { + requestMessage := &ResourceModificationRequestResponse{ + Resource: Network, + Request: request, + Data: endpointID, + } + err := modifyContainer(containerID, requestMessage) + + if err != nil { + return err + } + + return nil +} + +// GetHNSEndpointByID get the Endpoint by ID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return hns.GetHNSEndpointByID(endpointID) +} + +// GetHNSEndpointByName gets the endpoint filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + return hns.GetHNSEndpointByName(endpointName) +} + +// GetHNSEndpointStats gets the endpoint stats by ID +func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) { + return hns.GetHNSEndpointStats(endpointName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go new file mode 100644 index 0000000000..c564bf4a35 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go @@ -0,0 +1,18 @@ +//go:build windows + +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +type HNSGlobals = hns.HNSGlobals +type HNSVersion = hns.HNSVersion + +var ( + HNSVersion1803 = hns.HNSVersion1803 +) + +func GetHNSGlobals() (*HNSGlobals, error) { + return hns.GetHNSGlobals() +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go new file mode 100644 index 0000000000..925c212495 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go @@ -0,0 +1,38 @@ +//go:build windows + +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// Subnet is associated with a network and represents a list +// of subnets available to the network +type Subnet = hns.Subnet + +// MacPool is associated with a network and represents a list +// of macaddresses available to the network +type MacPool = hns.MacPool + +// HNSNetwork represents a network in HNS +type HNSNetwork = hns.HNSNetwork + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + return hns.HNSNetworkRequest(method, path, request) +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + return hns.HNSListNetworkRequest(method, path, request) +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return hns.GetHNSNetworkByID(networkID) +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + return hns.GetHNSNetworkByName(networkName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go new file mode 100644 index 0000000000..00ab263644 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go @@ -0,0 +1,60 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// Type of Request Support in ModifySystem +type PolicyType = hns.PolicyType + +// RequestType const +const ( + Nat = hns.Nat + ACL = hns.ACL + PA = hns.PA + VLAN = hns.VLAN + VSID = hns.VSID + VNet = hns.VNet + L2Driver = hns.L2Driver + Isolation = hns.Isolation + QOS = hns.QOS + OutboundNat = hns.OutboundNat + ExternalLoadBalancer = hns.ExternalLoadBalancer + Route = hns.Route + Proxy = hns.Proxy +) + +type ProxyPolicy = hns.ProxyPolicy + +type NatPolicy = hns.NatPolicy + +type QosPolicy = hns.QosPolicy + +type IsolationPolicy = hns.IsolationPolicy + +type VlanPolicy = hns.VlanPolicy + +type VsidPolicy = hns.VsidPolicy + +type PaPolicy = hns.PaPolicy + +type OutboundNatPolicy = hns.OutboundNatPolicy + +type ActionType = hns.ActionType +type DirectionType = hns.DirectionType +type RuleType = hns.RuleType + +const ( + Allow = hns.Allow + Block = hns.Block + + In = hns.In + Out = hns.Out + + Host = hns.Host + Switch = hns.Switch +) + +type ACLPolicy = hns.ACLPolicy + +type Policy = hns.Policy diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go new file mode 100644 index 0000000000..9bfe61ee83 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go @@ -0,0 +1,49 @@ +//go:build windows + +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// RoutePolicy is a structure defining schema for Route based Policy +type RoutePolicy = hns.RoutePolicy + +// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy +type ELBPolicy = hns.ELBPolicy + +// LBPolicy is a structure defining schema for LoadBalancing based Policy +type LBPolicy = hns.LBPolicy + +// PolicyList is a structure defining schema for Policy list request +type PolicyList = hns.PolicyList + +// HNSPolicyListRequest makes a call into HNS to update/query a single network +func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { + return hns.HNSPolicyListRequest(method, path, request) +} + +// HNSListPolicyListRequest gets all the policy list +func HNSListPolicyListRequest() ([]PolicyList, error) { + return hns.HNSListPolicyListRequest() +} + +// PolicyListRequest makes a HNS call to modify/query a network policy list +func PolicyListRequest(method, path, request string) (*PolicyList, error) { + return hns.PolicyListRequest(method, path, request) +} + +// GetPolicyListByID get the policy list by ID +func GetPolicyListByID(policyListID string) (*PolicyList, error) { + return hns.GetPolicyListByID(policyListID) +} + +// AddLoadBalancer policy list for the specified endpoints +func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { + return hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) +} + +// AddRoute adds route policy list for the specified endpoints +func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { + return hns.AddRoute(endpoints, destinationPrefix, nextHop, encapEnabled) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/hnssupport.go new file mode 100644 index 0000000000..d97681e0ca --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnssupport.go @@ -0,0 +1,15 @@ +//go:build windows + +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +type HNSSupportedFeatures = hns.HNSSupportedFeatures + +type HNSAclFeatures = hns.HNSAclFeatures + +func GetHNSSupportedFeatures() HNSSupportedFeatures { + return hns.GetHNSSupportedFeatures() +} diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go new file mode 100644 index 0000000000..81a2819516 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/interface.go @@ -0,0 +1,116 @@ +//go:build windows + +package hcsshim + +import ( + "io" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs/schema1" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig = schema1.ProcessConfig + +type Layer = schema1.Layer +type MappedDir = schema1.MappedDir +type MappedPipe = schema1.MappedPipe +type HvRuntime = schema1.HvRuntime +type MappedVirtualDisk = schema1.MappedVirtualDisk + +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice = schema1.AssignedDevice + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig = schema1.ContainerConfig + +type ComputeSystemQuery = schema1.ComputeSystemQuery + +// Container represents a created (but not necessarily running) container. +type Container interface { + // Start synchronously starts the container. + Start() error + + // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. + Shutdown() error + + // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. + Terminate() error + + // Waits synchronously waits for the container to shutdown or terminate. + Wait() error + + // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It + // returns false if timeout occurs. + WaitTimeout(time.Duration) error + + // Pause pauses the execution of a container. + Pause() error + + // Resume resumes the execution of a container. + Resume() error + + // HasPendingUpdates returns true if the container has updates pending to install. + HasPendingUpdates() (bool, error) + + // Statistics returns statistics for a container. + Statistics() (Statistics, error) + + // ProcessList returns details for the processes in a container. + ProcessList() ([]ProcessListItem, error) + + // MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller + MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) + + // CreateProcess launches a new process within the container. + CreateProcess(c *ProcessConfig) (Process, error) + + // OpenProcess gets an interface to an existing process within the container. + OpenProcess(pid int) (Process, error) + + // Close cleans up any state associated with the container but does not terminate or wait for it. + Close() error + + // Modify the System + Modify(config *ResourceModificationRequestResponse) error +} + +// Process represents a running or exited process. +type Process interface { + // Pid returns the process ID of the process within the container. + Pid() int + + // Kill signals the process to terminate but does not wait for it to finish terminating. + Kill() error + + // Wait waits for the process to exit. + Wait() error + + // WaitTimeout waits for the process to exit or the duration to elapse. It returns + // false if timeout occurs. + WaitTimeout(time.Duration) error + + // ExitCode returns the exit code of the process. The process must have + // already terminated. + ExitCode() (int, error) + + // ResizeConsole resizes the console of the process. + ResizeConsole(width, height uint16) error + + // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing + // these pipes does not close the underlying pipes; it should be possible to + // call this multiple times to get multiple interfaces. + Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) + + // CloseStdin closes the write side of the stdin pipe so that the process is + // notified on the read side that there is no more data in stdin. + CloseStdin() error + + // Close cleans up any state associated with the process but does not kill + // or wait on it. + Close() error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go new file mode 100644 index 0000000000..b60cd383b6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -0,0 +1,99 @@ +//go:build windows + +package cow + +import ( + "context" + "io" + + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// Process is the interface for an OS process running in a container or utility VM. +type Process interface { + // Close releases resources associated with the process and closes the + // writer and readers returned by Stdio. Depending on the implementation, + // this may also terminate the process. + Close() error + // CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever + // is appropriate to indicate that no more data is available. + CloseStdin(ctx context.Context) error + // CloseStdout closes the stdout connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStdout(ctx context.Context) error + // CloseStderr closes the stderr connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStderr(ctx context.Context) error + // Pid returns the process ID. + Pid() int + // Stdio returns the stdio streams for a process. These may be nil if a stream + // was not requested during CreateProcess. + Stdio() (_ io.Writer, _ io.Reader, _ io.Reader) + // ResizeConsole resizes the virtual terminal associated with the process. + ResizeConsole(ctx context.Context, width, height uint16) error + // Kill sends a SIGKILL or equivalent signal to the process and returns whether + // the signal was delivered. It does not wait for the process to terminate. + Kill(ctx context.Context) (bool, error) + // Signal sends a signal to the process and returns whether the signal was + // delivered. The input is OS specific (either + // guestrequest.SignalProcessOptionsWCOW or + // guestrequest.SignalProcessOptionsLCOW). It does not wait for the process + // to terminate. + Signal(ctx context.Context, options interface{}) (bool, error) + // Wait waits for the process to complete, or for a connection to the process to be + // terminated by some error condition (including calling Close). + Wait() error + // ExitCode returns the exit code of the process. Returns an error if the process is + // not running. + ExitCode() (int, error) +} + +// ProcessHost is the interface for creating processes. +type ProcessHost interface { + // CreateProcess creates a process. The configuration is host specific + // (either hcsschema.ProcessParameters or lcow.ProcessParameters). + CreateProcess(ctx context.Context, config interface{}) (Process, error) + // OS returns the host's operating system, "linux" or "windows". + OS() string + // IsOCI specifies whether this is an OCI-compliant process host. If true, + // then the configuration passed to CreateProcess should have an OCI process + // spec (or nil if this is the initial process in an OCI container). + // Otherwise, it should have the HCS-specific process parameters. + IsOCI() bool +} + +// Container is the interface for container objects, either running on the host or +// in a utility VM. +type Container interface { + ProcessHost + // Close releases the resources associated with the container. Depending on + // the implementation, this may also terminate the container. + Close() error + // ID returns the container ID. + ID() string + // Properties returns the requested container properties targeting a V1 schema container. + Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) + // PropertiesV2 returns the requested container properties targeting a V2 schema container. + PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) + // Start starts a container. + Start(ctx context.Context) error + // Shutdown sends a shutdown request to the container (but does not wait for + // the shutdown to complete). + Shutdown(ctx context.Context) error + // Terminate sends a terminate request to the container (but does not wait + // for the terminate to complete). + Terminate(ctx context.Context) error + // Wait waits for the container to terminate, or for the connection to the + // container to be terminated by some error condition (including calling + // Close). + Wait() error + // WaitChannel returns the wait channel of the container + WaitChannel() <-chan struct{} + // WaitError returns the container termination error. + // This function should only be called after the channel in WaitChannel() + // is closed. Otherwise it is not thread safe. + WaitError() error + // Modify sends a request to modify container resources + Modify(ctx context.Context, config interface{}) error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go new file mode 100644 index 0000000000..7b27173c3a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -0,0 +1,163 @@ +//go:build windows + +package hcs + +import ( + "fmt" + "sync" + "syscall" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "github.com/sirupsen/logrus" +) + +var ( + nextCallback uintptr + callbackMap = map[uintptr]*notificationWatcherContext{} + callbackMapLock = sync.RWMutex{} + + notificationWatcherCallback = syscall.NewCallback(notificationWatcher) + + // Notifications for HCS_SYSTEM handles + hcsNotificationSystemExited hcsNotification = 0x00000001 + hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 + hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 + hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 + hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 + hcsNotificationSystemCrashReport hcsNotification = 0x00000006 + hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007 + hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008 + hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009 + hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A + hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B + hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C + hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D + hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E + + // Notifications for HCS_PROCESS handles + hcsNotificationProcessExited hcsNotification = 0x00010000 + + // Common notifications + hcsNotificationInvalid hcsNotification = 0x00000000 + hcsNotificationServiceDisconnect hcsNotification = 0x01000000 +) + +type hcsNotification uint32 + +func (hn hcsNotification) String() string { + switch hn { + case hcsNotificationSystemExited: + return "SystemExited" + case hcsNotificationSystemCreateCompleted: + return "SystemCreateCompleted" + case hcsNotificationSystemStartCompleted: + return "SystemStartCompleted" + case hcsNotificationSystemPauseCompleted: + return "SystemPauseCompleted" + case hcsNotificationSystemResumeCompleted: + return "SystemResumeCompleted" + case hcsNotificationSystemCrashReport: + return "SystemCrashReport" + case hcsNotificationSystemSiloJobCreated: + return "SystemSiloJobCreated" + case hcsNotificationSystemSaveCompleted: + return "SystemSaveCompleted" + case hcsNotificationSystemRdpEnhancedModeStateChanged: + return "SystemRdpEnhancedModeStateChanged" + case hcsNotificationSystemShutdownFailed: + return "SystemShutdownFailed" + case hcsNotificationSystemGetPropertiesCompleted: + return "SystemGetPropertiesCompleted" + case hcsNotificationSystemModifyCompleted: + return "SystemModifyCompleted" + case hcsNotificationSystemCrashInitiated: + return "SystemCrashInitiated" + case hcsNotificationSystemGuestConnectionClosed: + return "SystemGuestConnectionClosed" + case hcsNotificationProcessExited: + return "ProcessExited" + case hcsNotificationInvalid: + return "Invalid" + case hcsNotificationServiceDisconnect: + return "ServiceDisconnect" + default: + return fmt.Sprintf("Unknown: %d", hn) + } +} + +type notificationChannel chan error + +type notificationWatcherContext struct { + channels notificationChannels + handle vmcompute.HcsCallback + + systemID string + processID int +} + +type notificationChannels map[hcsNotification]notificationChannel + +func newSystemChannels() notificationChannels { + channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationSystemExited, + hcsNotificationSystemCreateCompleted, + hcsNotificationSystemStartCompleted, + hcsNotificationSystemPauseCompleted, + hcsNotificationSystemResumeCompleted, + hcsNotificationSystemSaveCompleted, + } { + channels[notif] = make(notificationChannel, 1) + } + return channels +} + +func newProcessChannels() notificationChannels { + channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationProcessExited, + } { + channels[notif] = make(notificationChannel, 1) + } + return channels +} + +func closeChannels(channels notificationChannels) { + for _, c := range channels { + close(c) + } +} + +func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { + var result error + if int32(notificationStatus) < 0 { + result = interop.Win32FromHresult(notificationStatus) + } + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return 0 + } + + log := logrus.WithFields(logrus.Fields{ + "notification-type": notificationType.String(), + "system-id": context.systemID, + }) + if context.processID != 0 { + log.Data[logfields.ProcessID] = context.processID + } + log.Debug("HCS notification") + + if channel, ok := context.channels[notificationType]; ok { + channel <- result + } + + return 0 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go new file mode 100644 index 0000000000..d792dda986 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go @@ -0,0 +1 @@ +package hcs diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go new file mode 100644 index 0000000000..3e10f5c7e0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -0,0 +1,336 @@ +//go:build windows + +package hcs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "syscall" + + "github.com/Microsoft/hcsshim/internal/log" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists + ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = syscall.Errno(0x490) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = syscall.Errno(0x32) + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = syscall.Errno(0xd) + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed") + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = errors.New("hcsshim: timeout waiting for notification") + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = errors.New("unexpected container exit") + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = errors.New("unexpected value returned from hcs") + + // ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied + ErrOperationDenied = errors.New("operation denied") + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) + + // ErrProcNotFound is an error encountered when a procedure look up fails. + ErrProcNotFound = syscall.Errno(0x7f) + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5) + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d) + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) + + // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly + ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106) + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = errors.New("unsupported platform request") + + // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. + ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) + + // ErrInvalidHandle is an error that can be encountered when querying the properties of a compute system when the handle to that + // compute system has already been closed. + ErrInvalidHandle = syscall.Errno(0x6) +) + +type ErrorEvent struct { + Message string `json:"Message,omitempty"` // Fully formated error message + StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form + Provider string `json:"Provider,omitempty"` + EventID uint16 `json:"EventId,omitempty"` + Flags uint32 `json:"Flags,omitempty"` + Source string `json:"Source,omitempty"` + //Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function) +} + +type hcsResult struct { + Error int32 + ErrorMessage string + ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"` +} + +func (ev *ErrorEvent) String() string { + evs := "[Event Detail: " + ev.Message + if ev.StackTrace != "" { + evs += " Stack Trace: " + ev.StackTrace + } + if ev.Provider != "" { + evs += " Provider: " + ev.Provider + } + if ev.EventID != 0 { + evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID) + } + if ev.Flags != 0 { + evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags) + } + if ev.Source != "" { + evs += " Source: " + ev.Source + } + evs += "]" + return evs +} + +func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent { + if resultJSON != "" { + result := &hcsResult{} + if err := json.Unmarshal([]byte(resultJSON), result); err != nil { + log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result") + return nil + } + return result.ErrorEvents + } + return nil +} + +type HcsError struct { + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &HcsError{} + +func (e *HcsError) Error() string { + s := e.Op + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *HcsError) Is(target error) bool { + return errors.Is(e.Err, target) +} + +// unwrap isnt really needed, but helpful convince function + +func (e *HcsError) Unwrap() error { + return e.Err +} + +// Deprecated: net.Error.Temporary is deprecated. +func (e *HcsError) Temporary() bool { + err := e.netError() + return (err != nil) && err.Temporary() +} + +func (e *HcsError) Timeout() bool { + err := e.netError() + return (err != nil) && err.Timeout() +} + +func (e *HcsError) netError() (err net.Error) { + if errors.As(e.Unwrap(), &err) { + return err + } + return nil +} + +// SystemError is an error encountered in HCS during an operation on a Container object +type SystemError struct { + HcsError + ID string +} + +var _ net.Error = &SystemError{} + +func (e *SystemError) Error() string { + s := e.Op + " " + e.ID + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { + // Don't double wrap errors + var e *SystemError + if errors.As(err, &e) { + return err + } + + return &SystemError{ + ID: system.ID(), + HcsError: HcsError{ + Op: op, + Err: err, + Events: events, + }, + } +} + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + HcsError + SystemID string + Pid int +} + +var _ net.Error = &ProcessError{} + +func (e *ProcessError) Error() string { + s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error()) + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { + // Don't double wrap errors + var e *ProcessError + if errors.As(err, &e) { + return err + } + return &ProcessError{ + Pid: process.Pid(), + SystemID: process.SystemID(), + HcsError: HcsError{ + Op: op, + Err: err, + Events: events, + }, + } +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsNotExist(err error) bool { + return IsAny(err, ErrComputeSystemDoesNotExist, ErrElementNotFound) +} + +// IsErrorInvalidHandle checks whether the error is the result of an operation carried +// out on a handle that is invalid/closed. This error popped up while trying to query +// stats on a container in the process of being stopped. +func IsErrorInvalidHandle(err error) bool { + return errors.Is(err, ErrInvalidHandle) +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + return errors.Is(err, ErrAlreadyClosed) +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + return errors.Is(err, ErrVmcomputeOperationPending) +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + // HcsError and co. implement Timeout regardless of whether the errors they wrap do, + // so `errors.As(err, net.Error)`` will always be true. + // Using `errors.As(err.Unwrap(), net.Err)` wont work for general errors. + // So first check if there an `ErrTimeout` in the chain, then convert to a net error. + if errors.Is(err, ErrTimeout) { + return true + } + + var nerr net.Error + return errors.As(err, &nerr) && nerr.Timeout() +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsAlreadyStopped(err error) bool { + return IsAny(err, ErrVmcomputeAlreadyStopped, ErrProcessAlreadyStopped, ErrElementNotFound) +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + // If Platform doesn't recognize or support the request sent, below errors are seen + return IsAny(err, ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported, ErrVmcomputeUnknownMessage) +} + +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + return errors.Is(err, ErrVmcomputeOperationInvalidState) +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + return errors.Is(err, ErrVmcomputeOperationAccessIsDenied) +} + +// IsAny is a vectorized version of [errors.Is], it returns true if err is one of targets. +func IsAny(err error, targets ...error) bool { + for _, e := range targets { + if errors.Is(err, e) { + return true + } + } + return false +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go new file mode 100644 index 0000000000..37afbf6917 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -0,0 +1,583 @@ +//go:build windows + +package hcs + +import ( + "context" + "encoding/json" + "errors" + "io" + "os" + "sync" + "syscall" + "time" + + "go.opencensus.io/trace" + + "github.com/Microsoft/hcsshim/internal/cow" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" + "github.com/Microsoft/hcsshim/internal/vmcompute" +) + +type Process struct { + handleLock sync.RWMutex + handle vmcompute.HcsProcess + processID int + system *System + hasCachedStdio bool + stdioLock sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + callbackNumber uintptr + killSignalDelivered bool + + closedWaitOnce sync.Once + waitBlock chan struct{} + exitCode int + waitError error +} + +var _ cow.Process = &Process{} + +func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process { + return &Process{ + handle: process, + processID: processID, + system: computeSystem, + waitBlock: make(chan struct{}), + } +} + +// Pid returns the process ID of the process within the container. +func (process *Process) Pid() int { + return process.processID +} + +// SystemID returns the ID of the process's compute system. +func (process *Process) SystemID() string { + return process.system.ID() +} + +func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { + switch err { + case nil: + return true, nil + case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: + if !process.stopped() { + // The process should be gone, but we have not received the notification. + // After a second, force unblock the process wait to work around a possible + // deadlock in the HCS. + go func() { + time.Sleep(time.Second) + process.closedWaitOnce.Do(func() { + log.G(ctx).WithError(err).Warn("force unblocking process waits") + process.exitCode = -1 + process.waitError = err + close(process.waitBlock) + }) + }() + } + return false, nil + default: + return false, err + } +} + +// Signal signals the process with `options`. +// +// For LCOW `guestresource.SignalProcessOptionsLCOW`. +// +// For WCOW `guestresource.SignalProcessOptionsWCOW`. +func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::Signal" + + if process.handle == 0 { + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + optionsb, err := json.Marshal(options) + if err != nil { + return false, err + } + + resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb)) + events := processHcsResult(ctx, resultJSON) + delivered, err := process.processSignalResult(ctx, err) + if err != nil { + err = makeProcessError(process, operation, err, events) + } + return delivered, err +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *Process) Kill(ctx context.Context) (bool, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::Kill" + + if process.handle == 0 { + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + if process.stopped() { + return false, makeProcessError(process, operation, ErrProcessAlreadyStopped, nil) + } + + if process.killSignalDelivered { + // A kill signal has already been sent to this process. Sending a second + // one offers no real benefit, as processes cannot stop themselves from + // being terminated, once a TerminateProcess has been issued. Sending a + // second kill may result in a number of errors (two of which detailed bellow) + // and which we can avoid handling. + return true, nil + } + + // HCS serializes the signals sent to a target pid per compute system handle. + // To avoid SIGKILL being serialized behind other signals, we open a new compute + // system handle to deliver the kill signal. + // If the calls to opening a new compute system handle fail, we forcefully + // terminate the container itself so that no container is left behind + hcsSystem, err := OpenComputeSystem(ctx, process.system.id) + if err != nil { + // log error and force termination of container + log.G(ctx).WithField("err", err).Error("OpenComputeSystem() call failed") + err = process.system.Terminate(ctx) + // if the Terminate() call itself ever failed, log and return error + if err != nil { + log.G(ctx).WithField("err", err).Error("Terminate() call failed") + return false, err + } + process.system.Close() + return true, nil + } + defer hcsSystem.Close() + + newProcessHandle, err := hcsSystem.OpenProcess(ctx, process.Pid()) + if err != nil { + // Return true only if the target process has either already + // exited, or does not exist. + if IsAlreadyStopped(err) { + return true, nil + } else { + return false, err + } + } + defer newProcessHandle.Close() + + resultJSON, err := vmcompute.HcsTerminateProcess(ctx, newProcessHandle.handle) + if err != nil { + // We still need to check these two cases, as processes may still be killed by an + // external actor (human operator, OOM, random script etc). + if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) { + // There are two cases where it should be safe to ignore an error returned + // by HcsTerminateProcess. The first one is cause by the fact that + // HcsTerminateProcess ends up calling TerminateProcess in the context + // of a container. According to the TerminateProcess documentation: + // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks + // After a process has terminated, call to TerminateProcess with open + // handles to the process fails with ERROR_ACCESS_DENIED (5) error code. + // It's safe to ignore this error here. HCS should always have permissions + // to kill processes inside any container. So an ERROR_ACCESS_DENIED + // is unlikely to be anything else than what the ending remarks in the + // documentation states. + // + // The second case is generated by hcs itself, if for any reason HcsTerminateProcess + // is called twice in a very short amount of time. In such cases, hcs may return + // HCS_E_PROCESS_ALREADY_STOPPED. + return true, nil + } + } + events := processHcsResult(ctx, resultJSON) + delivered, err := newProcessHandle.processSignalResult(ctx, err) + if err != nil { + err = makeProcessError(newProcessHandle, operation, err, events) + } + + process.killSignalDelivered = delivered + return delivered, err +} + +// waitBackground waits for the process exit notification. Once received sets +// `process.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `process.handle` but `Wait` is safe to +// call multiple times. +func (process *Process) waitBackground() { + operation := "hcs::Process::waitBackground" + ctx, span := oc.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + var ( + err error + exitCode = -1 + propertiesJSON string + resultJSON string + ) + + err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil) + if err != nil { + err = makeProcessError(process, operation, err, nil) + log.G(ctx).WithError(err).Error("failed wait") + } else { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + // Make sure we didn't race with Close() here + if process.handle != 0 { + propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + err = makeProcessError(process, operation, err, events) + } else { + properties := &hcsschema.ProcessStatus{} + err = json.Unmarshal([]byte(propertiesJSON), properties) + if err != nil { + err = makeProcessError(process, operation, err, nil) + } else { + if properties.LastWaitResult != 0 { + log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") + } else { + exitCode = int(properties.ExitCode) + } + } + } + } + } + log.G(ctx).WithField("exitCode", exitCode).Debug("process exited") + + process.closedWaitOnce.Do(func() { + process.exitCode = exitCode + process.waitError = err + close(process.waitBlock) + }) + oc.SetSpanStatus(span, err) +} + +// Wait waits for the process to exit. If the process has already exited returns +// the previous error (if any). +func (process *Process) Wait() error { + <-process.waitBlock + return process.waitError +} + +// Exited returns if the process has stopped +func (process *Process) stopped() bool { + select { + case <-process.waitBlock: + return true + default: + return false + } +} + +// ResizeConsole resizes the console of the process. +func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::ResizeConsole" + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + modifyRequest := hcsschema.ProcessModifyRequest{ + Operation: guestrequest.ModifyProcessConsoleSize, + ConsoleSize: &hcsschema.ConsoleSize{ + Height: height, + Width: width, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + return nil +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *Process) ExitCode() (int, error) { + if !process.stopped() { + return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil) + } + if process.waitError != nil { + return -1, process.waitError + } + return process.exitCode, nil +} + +// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes. Once returned, these pipes +// are the responsibility of the caller to close. +func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { + operation := "hcs::Process::StdioLegacy" + ctx, span := oc.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + if process.handle == 0 { + return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.hasCachedStdio { + stdin, stdout, stderr := process.stdin, process.stdout, process.stderr + process.stdin, process.stdout, process.stderr = nil, nil, nil + process.hasCachedStdio = false + return stdin, stdout, stderr, nil + } + + processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, events) + } + + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, nil) + } + + return pipes[0], pipes[1], pipes[2], nil +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. +// To close them, close the process handle, or use the `CloseStd*` functions. +func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + return process.stdin, process.stdout, process.stderr +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *Process) CloseStdin(ctx context.Context) (err error) { + operation := "hcs::Process::CloseStdin" + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + //HcsModifyProcess request to close stdin will fail if the process has already exited + if !process.stopped() { + modifyRequest := hcsschema.ProcessModifyRequest{ + Operation: guestrequest.CloseProcessHandle, + CloseHandle: &hcsschema.CloseHandle{ + Handle: guestrequest.STDInHandle, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeProcessError(process, operation, err, events) + } + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + + return nil +} + +func (process *Process) CloseStdout(ctx context.Context) (err error) { + ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + return nil +} + +func (process *Process) CloseStderr(ctx context.Context) (err error) { + ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + } + return nil +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *Process) Close() (err error) { + operation := "hcs::Process::Close" + ctx, span := oc.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + // Don't double free this + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + } + process.stdioLock.Unlock() + + if err = process.unregisterCallback(ctx); err != nil { + return makeProcessError(process, operation, err, nil) + } + + if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil { + return makeProcessError(process, operation, err, nil) + } + + process.handle = 0 + process.closedWaitOnce.Do(func() { + process.exitCode = -1 + process.waitError = ErrAlreadyClosed + close(process.waitBlock) + }) + + return nil +} + +func (process *Process) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newProcessChannels(), + systemID: process.SystemID(), + processID: process.processID, + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = callbackContext + callbackMapLock.Unlock() + + callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber) + if err != nil { + return err + } + callbackContext.handle = callbackHandle + process.callbackNumber = callbackNumber + + return nil +} + +func (process *Process) unregisterCallback(ctx context.Context) error { + callbackNumber := process.callbackNumber + + callbackMapLock.RLock() + callbackContext := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if callbackContext == nil { + return nil + } + + handle := callbackContext.handle + + if handle == 0 { + return nil + } + + // vmcompute.HcsUnregisterProcessCallback has its own synchronization to + // wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := vmcompute.HcsUnregisterProcessCallback(ctx, handle) + if err != nil { + return err + } + + closeChannels(callbackContext.channels) + + callbackMapLock.Lock() + delete(callbackMap, callbackNumber) + callbackMapLock.Unlock() + + handle = 0 //nolint:ineffassign + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go new file mode 100644 index 0000000000..d1f219cfad --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go @@ -0,0 +1,252 @@ +//go:build windows + +package schema1 + +import ( + "encoding/json" + "time" + + "github.com/Microsoft/go-winio/pkg/guid" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig struct { + ApplicationName string `json:",omitempty"` + CommandLine string `json:",omitempty"` + CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows + User string `json:",omitempty"` + WorkingDirectory string `json:",omitempty"` + Environment map[string]string `json:",omitempty"` + EmulateConsole bool `json:",omitempty"` + CreateStdInPipe bool `json:",omitempty"` + CreateStdOutPipe bool `json:",omitempty"` + CreateStdErrPipe bool `json:",omitempty"` + ConsoleSize [2]uint `json:",omitempty"` + CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows + OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows +} + +type Layer struct { + ID string + Path string +} + +type MappedDir struct { + HostPath string + ContainerPath string + ReadOnly bool + BandwidthMaximum uint64 + IOPSMaximum uint64 + CreateInUtilityVM bool + // LinuxMetadata - Support added in 1803/RS4+. + LinuxMetadata bool `json:",omitempty"` +} + +type MappedPipe struct { + HostPath string + ContainerPipeName string +} + +type HvRuntime struct { + ImagePath string `json:",omitempty"` + SkipTemplate bool `json:",omitempty"` + LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM + LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM + LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode + BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD + WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD +} + +type MappedVirtualDisk struct { + HostPath string `json:",omitempty"` // Path to VHD on the host + ContainerPath string // Platform-specific mount point path in the container + CreateInUtilityVM bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" + AttachOnly bool `json:",omitempty"` +} + +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice struct { + // InterfaceClassGUID of the device to assign to container. + InterfaceClassGUID string `json:"InterfaceClassGuid,omitempty"` +} + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig struct { + SystemType string // HCS requires this to be hard-coded to "Container" + Name string // Name of the container. We use the docker ID. + Owner string `json:",omitempty"` // The management platform that created this container + VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID} + IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows + LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID + Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID + Credentials string `json:",omitempty"` // Credentials information + ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container. + ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares. + ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit. + StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS + StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second + StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller + MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes + HostName string `json:",omitempty"` // Hostname + MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts) + MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes + HvPartition bool // True if it a Hyper-V Container + NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with. + EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container + HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM + Servicing bool `json:",omitempty"` // True if this container is for servicing + AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution + DNSSearchList string `json:",omitempty"` // Comma separated list of DNS suffixes to use for name resolution + ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. + TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed + MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start + AssignedDevices []AssignedDevice `json:",omitempty"` // Array of devices to assign. NOTE: Support added in RS5 +} + +type ComputeSystemQuery struct { + IDs []string `json:"Ids,omitempty"` + Types []string `json:",omitempty"` + Names []string `json:",omitempty"` + Owners []string `json:",omitempty"` +} + +type PropertyType string + +const ( + PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 + PropertyTypeProcessList PropertyType = "ProcessList" // V1 and V2 + PropertyTypeMappedVirtualDisk PropertyType = "MappedVirtualDisk" // Not supported in V2 schema call + PropertyTypeGuestConnection PropertyType = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 +) + +type PropertyQuery struct { + PropertyTypes []PropertyType `json:",omitempty"` +} + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties struct { + ID string `json:"Id"` + State string + Name string + SystemType string + RuntimeOSType string `json:"RuntimeOsType,omitempty"` + Owner string + SiloGUID string `json:"SiloGuid,omitempty"` + RuntimeID guid.GUID `json:"RuntimeId,omitempty"` + IsRuntimeTemplate bool `json:",omitempty"` + RuntimeImagePath string `json:",omitempty"` + Stopped bool `json:",omitempty"` + ExitType string `json:",omitempty"` + AreUpdatesPending bool `json:",omitempty"` + ObRoot string `json:",omitempty"` + Statistics Statistics `json:",omitempty"` + ProcessList []ProcessListItem `json:",omitempty"` + MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"` + GuestConnectionInfo GuestConnectionInfo `json:",omitempty"` +} + +// MemoryStats holds the memory statistics for a container +type MemoryStats struct { + UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:",omitempty"` + RuntimeUser100ns uint64 `json:",omitempty"` + RuntimeKernel100ns uint64 `json:",omitempty"` +} + +// StorageStats holds the storage statistics for a container +type StorageStats struct { + ReadCountNormalized uint64 `json:",omitempty"` + ReadSizeBytes uint64 `json:",omitempty"` + WriteCountNormalized uint64 `json:",omitempty"` + WriteSizeBytes uint64 `json:",omitempty"` +} + +// NetworkStats holds the network statistics for a container +type NetworkStats struct { + BytesReceived uint64 `json:",omitempty"` + BytesSent uint64 `json:",omitempty"` + PacketsReceived uint64 `json:",omitempty"` + PacketsSent uint64 `json:",omitempty"` + DroppedPacketsIncoming uint64 `json:",omitempty"` + DroppedPacketsOutgoing uint64 `json:",omitempty"` + EndpointId string `json:",omitempty"` + InstanceId string `json:",omitempty"` +} + +// Statistics is the structure returned by a statistics call on a container +type Statistics struct { + Timestamp time.Time `json:",omitempty"` + ContainerStartTime time.Time `json:",omitempty"` + Uptime100ns uint64 `json:",omitempty"` + Memory MemoryStats `json:",omitempty"` + Processor ProcessorStats `json:",omitempty"` + Storage StorageStats `json:",omitempty"` + Network []NetworkStats `json:",omitempty"` +} + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem struct { + CreateTimestamp time.Time `json:",omitempty"` + ImageName string `json:",omitempty"` + KernelTime100ns uint64 `json:",omitempty"` + MemoryCommitBytes uint64 `json:",omitempty"` + MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"` + MemoryWorkingSetSharedBytes uint64 `json:",omitempty"` + ProcessId uint32 `json:",omitempty"` + UserTime100ns uint64 `json:",omitempty"` +} + +// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container +type MappedVirtualDiskController struct { + MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"` +} + +// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM +type GuestDefinedCapabilities struct { + NamespaceAddRequestSupported bool `json:",omitempty"` + SignalProcessSupported bool `json:",omitempty"` + DumpStacksSupported bool `json:",omitempty"` + DeleteContainerStateSupported bool `json:",omitempty"` + UpdateContainerSupported bool `json:",omitempty"` +} + +// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM +type GuestConnectionInfo struct { + SupportedSchemaVersions []hcsschema.Version `json:",omitempty"` + ProtocolVersion uint32 `json:",omitempty"` + GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"` +} + +// Type of Request Support in ModifySystem +type RequestType string + +// Type of Resource Support in ModifySystem +type ResourceType string + +// RequestType const +const ( + Add RequestType = "Add" + Remove RequestType = "Remove" + Network ResourceType = "Network" +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse struct { + Resource ResourceType `json:"ResourceType"` + Data interface{} `json:"Settings"` + Request RequestType `json:"RequestType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go new file mode 100644 index 0000000000..70884aad75 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go @@ -0,0 +1,36 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Attachment struct { + Type_ string `json:"Type,omitempty"` + + Path string `json:"Path,omitempty"` + + IgnoreFlushes bool `json:"IgnoreFlushes,omitempty"` + + CachingMode string `json:"CachingMode,omitempty"` + + NoWriteHardening bool `json:"NoWriteHardening,omitempty"` + + DisableExpansionOptimization bool `json:"DisableExpansionOptimization,omitempty"` + + IgnoreRelativeLocator bool `json:"IgnoreRelativeLocator,omitempty"` + + CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"` + + AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"` + + ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go new file mode 100644 index 0000000000..ecbbed4c23 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Battery struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go new file mode 100644 index 0000000000..c1ea3953b5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CacheQueryStatsResponse struct { + L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"` + + L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"` + + L3LocalBwBytes int32 `json:"L3LocalBwBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go new file mode 100644 index 0000000000..ca75277a3f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Chipset struct { + Uefi *Uefi `json:"Uefi,omitempty"` + + IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"` + + BaseBoardSerialNumber string `json:"BaseBoardSerialNumber,omitempty"` + + ChassisSerialNumber string `json:"ChassisSerialNumber,omitempty"` + + ChassisAssetTag string `json:"ChassisAssetTag,omitempty"` + + UseUtc bool `json:"UseUtc,omitempty"` + + // LinuxKernelDirect - Added in v2.2 Builds >=181117 + LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go new file mode 100644 index 0000000000..81865e7ea4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.5 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +const ( + CimMountFlagNone uint32 = 0x0 + CimMountFlagChildOnly uint32 = 0x1 + CimMountFlagEnableDax uint32 = 0x2 + CimMountFlagCacheFiles uint32 = 0x4 + CimMountFlagCacheRegions uint32 = 0x8 +) + +type CimMount struct { + ImagePath string `json:"ImagePath,omitempty"` + FileSystemName string `json:"FileSystemName,omitempty"` + VolumeGuid string `json:"VolumeGuid,omitempty"` + MountFlags uint32 `json:"MountFlags,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go new file mode 100644 index 0000000000..bb36777b82 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" + +type CloseHandle struct { + Handle guestrequest.STDIOHandle `json:"Handle,omitempty"` // NOTE: Swagger generated as string. Locally updated. +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go new file mode 100644 index 0000000000..8bf8cab60e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. +type ComPort struct { + NamedPipe string `json:"NamedPipe,omitempty"` + + OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go new file mode 100644 index 0000000000..10cea67e04 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ComputeSystem struct { + Owner string `json:"Owner,omitempty"` + + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + HostedSystem interface{} `json:"HostedSystem,omitempty"` + + Container *Container `json:"Container,omitempty"` + + VirtualMachine *VirtualMachine `json:"VirtualMachine,omitempty"` + + ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go new file mode 100644 index 0000000000..1d5dfe68ad --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go @@ -0,0 +1,72 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "net/http" +) + +// contextKeys are used to identify the type of value in the context. +// Since these are string, it is possible to get a short description of the +// context key for logging and debugging using key.String(). + +type contextKey string + +func (c contextKey) String() string { + return "auth " + string(c) +} + +var ( + // ContextOAuth2 takes a oauth2.TokenSource as authentication for the request. + ContextOAuth2 = contextKey("token") + + // ContextBasicAuth takes BasicAuth as authentication for the request. + ContextBasicAuth = contextKey("basic") + + // ContextAccessToken takes a string oauth2 access token as authentication for the request. + ContextAccessToken = contextKey("accesstoken") + + // ContextAPIKey takes an APIKey as authentication for the request + ContextAPIKey = contextKey("apikey") +) + +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +type BasicAuth struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + +// APIKey provides API key based authentication to a request passed via context using ContextAPIKey +type APIKey struct { + Key string + Prefix string +} + +type Configuration struct { + BasePath string `json:"basePath,omitempty"` + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + HTTPClient *http.Client +} + +func NewConfiguration() *Configuration { + cfg := &Configuration{ + BasePath: "https://localhost", + DefaultHeader: make(map[string]string), + UserAgent: "Swagger-Codegen/2.1.0/go", + } + return cfg +} + +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go new file mode 100644 index 0000000000..347da50e86 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// NOTE: Swagger generated fields as int32. Locally updated to uint16 to match documentation. +// https://learn.microsoft.com/en-us/virtualization/api/hcs/schemareference#ConsoleSize + +type ConsoleSize struct { + Height uint16 `json:"Height,omitempty"` + + Width uint16 `json:"Width,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go new file mode 100644 index 0000000000..39a54432c0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go @@ -0,0 +1,36 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Container struct { + GuestOs *GuestOs `json:"GuestOs,omitempty"` + + Storage *Storage `json:"Storage,omitempty"` + + MappedDirectories []MappedDirectory `json:"MappedDirectories,omitempty"` + + MappedPipes []MappedPipe `json:"MappedPipes,omitempty"` + + Memory *Memory `json:"Memory,omitempty"` + + Processor *Processor `json:"Processor,omitempty"` + + Networking *Networking `json:"Networking,omitempty"` + + HvSocket *HvSocket `json:"HvSocket,omitempty"` + + ContainerCredentialGuard *ContainerCredentialGuardState `json:"ContainerCredentialGuard,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + AssignedDevices []Device `json:"AssignedDevices,omitempty"` + + AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go new file mode 100644 index 0000000000..495c6ebc8f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardAddInstanceRequest struct { + Id string `json:"Id,omitempty"` + CredentialSpec string `json:"CredentialSpec,omitempty"` + Transport string `json:"Transport,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go new file mode 100644 index 0000000000..1ed4c008f2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardHvSocketServiceConfig struct { + ServiceId string `json:"ServiceId,omitempty"` + ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go new file mode 100644 index 0000000000..d7ebd0fcca --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardInstance struct { + Id string `json:"Id,omitempty"` + CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"` + HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go new file mode 100644 index 0000000000..71005b090b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardModifyOperation string + +const ( + AddInstance ContainerCredentialGuardModifyOperation = "AddInstance" + RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go new file mode 100644 index 0000000000..952cda4965 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardOperationRequest struct { + Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go new file mode 100644 index 0000000000..32e5a3beed --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardRemoveInstanceRequest struct { + Id string `json:"Id,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go new file mode 100644 index 0000000000..0f8f644379 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardState struct { + + // Authentication cookie for calls to a Container Credential Guard instance. + Cookie string `json:"Cookie,omitempty"` + + // Name of the RPC endpoint of the Container Credential Guard instance. + RpcEndpoint string `json:"RpcEndpoint,omitempty"` + + // Transport used for the configured Container Credential Guard instance. + Transport string `json:"Transport,omitempty"` + + // Credential spec used for the configured Container Credential Guard instance. + CredentialSpec string `json:"CredentialSpec,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go new file mode 100644 index 0000000000..ea306fa21a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardSystemInfo struct { + Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go new file mode 100644 index 0000000000..1fd7ca5d56 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// memory usage as viewed from within the container +type ContainerMemoryInformation struct { + TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` + + TotalUsage int32 `json:"TotalUsage,omitempty"` + + CommittedBytes int32 `json:"CommittedBytes,omitempty"` + + SharedCommittedBytes int32 `json:"SharedCommittedBytes,omitempty"` + + CommitLimitBytes int32 `json:"CommitLimitBytes,omitempty"` + + PeakCommitmentBytes int32 `json:"PeakCommitmentBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go new file mode 100644 index 0000000000..90332a5190 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU groups allow Hyper-V administrators to better manage and allocate the host's CPU resources across guest virtual machines +type CpuGroup struct { + Id string `json:"Id,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go new file mode 100644 index 0000000000..8794961bf5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupAffinity struct { + LogicalProcessorCount int32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []int32 `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go new file mode 100644 index 0000000000..0be0475d41 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupConfig struct { + GroupId string `json:"GroupId,omitempty"` + Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` + GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` + // Hypervisor CPU group IDs exposed to clients + HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go new file mode 100644 index 0000000000..3ace0ccc3b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to return cpu groups for a Service property query +type CpuGroupConfigurations struct { + CpuGroups []CpuGroupConfig `json:"CpuGroups,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go new file mode 100644 index 0000000000..7d89780701 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CPUGroupOperation string + +const ( + CreateGroup CPUGroupOperation = "CreateGroup" + DeleteGroup CPUGroupOperation = "DeleteGroup" + SetProperty CPUGroupOperation = "SetProperty" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go new file mode 100644 index 0000000000..31fe07c3aa --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CPUGroupPropertyCode uint32 + +const ( + CPUCapacityProperty = 0x00010000 + CPUSchedulingPriorityProperty = 0x00020000 + IdleLPReserveProperty = 0x00030000 +) + +type CpuGroupProperty struct { + PropertyCode uint32 `json:"PropertyCode,omitempty"` + PropertyValue uint32 `json:"PropertyValue,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go new file mode 100644 index 0000000000..91a8278fe3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Create group operation settings +type CreateGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []uint32 `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go new file mode 100644 index 0000000000..5385850fe4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DebugOptions struct { + // BugcheckSavedStateFileName is the path for the file in which the guest VM state will be saved when + // the guest crashes. + BugcheckSavedStateFileName string `json:"BugcheckSavedStateFileName,omitempty"` + // BugcheckNoCrashdumpSavedStateFileName is the path of the file in which the guest VM state will be + // saved when the guest crashes but the guest isn't able to generate the crash dump. This usually + // happens in early boot failures. + BugcheckNoCrashdumpSavedStateFileName string `json:"BugcheckNoCrashdumpSavedStateFileName,omitempty"` + TripleFaultSavedStateFileName string `json:"TripleFaultSavedStateFileName,omitempty"` + FirmwareDumpFileName string `json:"FirmwareDumpFileName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go new file mode 100644 index 0000000000..134bd98817 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Delete group operation settings +type DeleteGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go new file mode 100644 index 0000000000..31c4538aff --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceType string + +const ( + ClassGUID DeviceType = "ClassGuid" + DeviceInstanceID DeviceType = "DeviceInstance" + GPUMirror DeviceType = "GpuMirror" +) + +type Device struct { + // The type of device to assign to the container. + Type DeviceType `json:"Type,omitempty"` + // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. + InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` + // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID. + LocationPath string `json:"LocationPath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go new file mode 100644 index 0000000000..e985d96d22 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go @@ -0,0 +1,46 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Devices struct { + ComPorts map[string]ComPort `json:"ComPorts,omitempty"` + + Scsi map[string]Scsi `json:"Scsi,omitempty"` + + VirtualPMem *VirtualPMemController `json:"VirtualPMem,omitempty"` + + NetworkAdapters map[string]NetworkAdapter `json:"NetworkAdapters,omitempty"` + + VideoMonitor *VideoMonitor `json:"VideoMonitor,omitempty"` + + Keyboard *Keyboard `json:"Keyboard,omitempty"` + + Mouse *Mouse `json:"Mouse,omitempty"` + + HvSocket *HvSocket2 `json:"HvSocket,omitempty"` + + EnhancedModeVideo *EnhancedModeVideo `json:"EnhancedModeVideo,omitempty"` + + GuestCrashReporting *GuestCrashReporting `json:"GuestCrashReporting,omitempty"` + + VirtualSmb *VirtualSmb `json:"VirtualSmb,omitempty"` + + Plan9 *Plan9 `json:"Plan9,omitempty"` + + Battery *Battery `json:"Battery,omitempty"` + + FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` + + SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` + + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + VirtualPci map[string]VirtualPciDevice `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go new file mode 100644 index 0000000000..85450c41e1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type EnhancedModeVideo struct { + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go new file mode 100644 index 0000000000..fe86cab655 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type FlexibleIoDevice struct { + EmulatorId string `json:"EmulatorId,omitempty"` + + HostingModel string `json:"HostingModel,omitempty"` + + Configuration []string `json:"Configuration,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go new file mode 100644 index 0000000000..7db29495b3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestConnection struct { + + // Use Vsock rather than Hyper-V sockets to communicate with the guest service. + UseVsock bool `json:"UseVsock,omitempty"` + + // Don't disconnect the guest connection when pausing the virtual machine. + UseConnectedSuspend bool `json:"UseConnectedSuspend,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go new file mode 100644 index 0000000000..8a369bab71 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Information about the guest. +type GuestConnectionInfo struct { + + // Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities. + SupportedSchemaVersions []Version `json:"SupportedSchemaVersions,omitempty"` + + ProtocolVersion int32 `json:"ProtocolVersion,omitempty"` + + GuestDefinedCapabilities *interface{} `json:"GuestDefinedCapabilities,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go new file mode 100644 index 0000000000..af82800483 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestCrashReporting struct { + WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go new file mode 100644 index 0000000000..8838519a39 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestOs struct { + HostName string `json:"HostName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go new file mode 100644 index 0000000000..a48a653945 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestState struct { + + // The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state. + GuestStateFilePath string `json:"GuestStateFilePath,omitempty"` + + // The guest state file type affected by different guest isolation modes - whether a file or block storage. + GuestStateFileType string `json:"GuestStateFileType,omitempty"` + + // The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state. + RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"` + + // If true, the guest state and runtime state files will be used as templates to populate transient, in-memory state instead of using the files as persistent backing store. + ForceTransientState bool `json:"ForceTransientState,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go new file mode 100644 index 0000000000..2238ce5306 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to request a service processor modification +type HostProcessorModificationRequest struct { + Operation CPUGroupOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go new file mode 100644 index 0000000000..ea3084bca7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HostedSystem struct { + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + Container *Container `json:"Container,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go new file mode 100644 index 0000000000..23b2ee9e7d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocket struct { + Config *HvSocketSystemConfig `json:"Config,omitempty"` + + EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go new file mode 100644 index 0000000000..a017691f02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// HvSocket configuration for a VM +type HvSocket2 struct { + HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go new file mode 100644 index 0000000000..84c11b93ee --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This class defines address settings applied to a VM +// by the GCS every time a VM starts or restores. +type HvSocketAddress struct { + LocalAddress string `json:"LocalAddress,omitempty"` + ParentAddress string `json:"ParentAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go new file mode 100644 index 0000000000..ecd9f7fbac --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go @@ -0,0 +1,28 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocketServiceConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to this specific service. If not specified, defaults to the system DefaultBindSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + BindSecurityDescriptor string `json:"BindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to this specific service. If not specified, defaults to the system DefaultConnectSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + ConnectSecurityDescriptor string `json:"ConnectSecurityDescriptor,omitempty"` + + // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors + AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` + + // Disabled controls whether the HvSocket service is accepting connection requests. + // This set to true will make the service refuse all incoming connections as well as cancel + // any connections already established. The service itself will still be active however + // and can be re-enabled at a future time. + Disabled bool `json:"Disabled,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go new file mode 100644 index 0000000000..69f4f9d39b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. +type HvSocketSystemConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds). + DefaultBindSecurityDescriptor string `json:"DefaultBindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to an unlisted service in the VM/container. + DefaultConnectSecurityDescriptor string `json:"DefaultConnectSecurityDescriptor,omitempty"` + + ServiceTable map[string]HvSocketServiceConfig `json:"ServiceTable,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go new file mode 100644 index 0000000000..a614d63bd7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go @@ -0,0 +1,42 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterruptModerationName string + +// The valid interrupt moderation modes for I/O virtualization (IOV) offloading. +const ( + DefaultName InterruptModerationName = "Default" + AdaptiveName InterruptModerationName = "Adaptive" + OffName InterruptModerationName = "Off" + LowName InterruptModerationName = "Low" + MediumName InterruptModerationName = "Medium" + HighName InterruptModerationName = "High" +) + +type InterruptModerationValue uint32 + +const ( + DefaultValue InterruptModerationValue = iota + AdaptiveValue + OffValue + LowValue InterruptModerationValue = 100 + MediumValue InterruptModerationValue = 200 + HighValue InterruptModerationValue = 300 +) + +var InterruptModerationValueToName = map[InterruptModerationValue]InterruptModerationName{ + DefaultValue: DefaultName, + AdaptiveValue: AdaptiveName, + OffValue: OffName, + LowValue: LowName, + MediumValue: MediumName, + HighValue: HighName, +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go new file mode 100644 index 0000000000..2a55cc37cd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type IovSettings struct { + // The weight assigned to this port for I/O virtualization (IOV) offloading. + // Setting this to 0 disables IOV offloading. + OffloadWeight *uint32 `json:"OffloadWeight,omitempty"` + + // The number of queue pairs requested for this port for I/O virtualization (IOV) offloading. + QueuePairsRequested *uint32 `json:"QueuePairsRequested,omitempty"` + + // The interrupt moderation mode for I/O virtualization (IOV) offloading. + InterruptModeration *InterruptModerationName `json:"InterruptModeration,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go new file mode 100644 index 0000000000..a34c2f99ae --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type IsolationSettings struct { + // Guest isolation type options to decide virtual trust levels of virtual machine + IsolationType string `json:"IsolationType,omitempty"` + // Configuration to debug HCL layer for HCS VM TODO: Task 31102306: Miss the way to prevent the exposure of private debug configuration in HCS TODO: Think about the secret configurations which are private in VMMS VM (only edit by hvsedit) + DebugHost string `json:"DebugHost,omitempty"` + DebugPort int64 `json:"DebugPort,omitempty"` + // Optional data passed by host on isolated virtual machine start + LaunchData string `json:"LaunchData,omitempty"` + HclEnabled *bool `json:"HclEnabled,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go new file mode 100644 index 0000000000..3d3fa3b1c7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Keyboard struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go new file mode 100644 index 0000000000..176c49d495 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Layer struct { + Id string `json:"Id,omitempty"` + + Path string `json:"Path,omitempty"` + + PathType string `json:"PathType,omitempty"` + + // Unspecified defaults to Enabled + Cache string `json:"Cache,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go new file mode 100644 index 0000000000..0ab6c280fc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.2 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LinuxKernelDirect struct { + KernelFilePath string `json:"KernelFilePath,omitempty"` + + InitRdPath string `json:"InitRdPath,omitempty"` + + KernelCmdLine string `json:"KernelCmdLine,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go new file mode 100644 index 0000000000..2e3aa5e175 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LogicalProcessor struct { + LpIndex uint32 `json:"LpIndex,omitempty"` + NodeNumber uint8 `json:"NodeNumber,omitempty"` + PackageId uint32 `json:"PackageId,omitempty"` + CoreId uint32 `json:"CoreId,omitempty"` + RootVpIndex int32 `json:"RootVpIndex,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go new file mode 100644 index 0000000000..9b86a40457 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedDirectory struct { + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` + + ContainerPath string `json:"ContainerPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go new file mode 100644 index 0000000000..208074e9a2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedPipe struct { + ContainerPipeName string `json:"ContainerPipeName,omitempty"` + + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go new file mode 100644 index 0000000000..30749c6724 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go new file mode 100644 index 0000000000..71224c75b9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go @@ -0,0 +1,49 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory2 struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` + + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + + EnableHotHint bool `json:"EnableHotHint,omitempty"` + + EnableColdHint bool `json:"EnableColdHint,omitempty"` + + EnableEpf bool `json:"EnableEpf,omitempty"` + + // EnableDeferredCommit is private in the schema. If regenerated need to add back. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` + + // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed + // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by + // the guest operating system). + EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` + + // LowMmioGapInMB is the low MMIO region allocated below 4GB. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` + + // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and + // size). + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + + // HighMmioGapInMB is the high MMIO region. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go new file mode 100644 index 0000000000..811779b04b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MemoryInformationForVm struct { + VirtualNodeCount uint32 `json:"VirtualNodeCount,omitempty"` + + VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"` + + VirtualNodes []VirtualNodeInfo `json:"VirtualNodes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go new file mode 100644 index 0000000000..906ba597f9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Memory runtime statistics +type MemoryStats struct { + MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + + MemoryUsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + + MemoryUsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go new file mode 100644 index 0000000000..8dbe40b3be --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerDefinitionDevice struct { + DeviceExtension []DeviceExtension `json:"device_extension,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go new file mode 100644 index 0000000000..8fe89f9274 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceCategory struct { + Name string `json:"name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go new file mode 100644 index 0000000000..a62568d892 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtension struct { + DeviceCategory *DeviceCategory `json:"device_category,omitempty"` + Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go new file mode 100644 index 0000000000..a7410febd6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceInstance struct { + Id string `json:"id,omitempty"` + LocationPath string `json:"location_path,omitempty"` + PortName string `json:"port_name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go new file mode 100644 index 0000000000..3553640647 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceNamespace struct { + RequiresDriverstore bool `json:"requires_driverstore,omitempty"` + DeviceCategory []DeviceCategory `json:"device_category,omitempty"` + DeviceInstance []DeviceInstance `json:"device_instance,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go new file mode 100644 index 0000000000..7be98b5410 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterfaceClass struct { + Type_ string `json:"type,omitempty"` + Identifier string `json:"identifier,omitempty"` + Recurse bool `json:"recurse,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go new file mode 100644 index 0000000000..3ab9cf1ecf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtensionNamespace struct { + Ob *ObjectNamespace `json:"ob,omitempty"` + Device *DeviceNamespace `json:"device,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go new file mode 100644 index 0000000000..d2f51b3b53 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectDirectory struct { + Name string `json:"name,omitempty"` + Clonesd string `json:"clonesd,omitempty"` + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go new file mode 100644 index 0000000000..47dfb55bfa --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectNamespace struct { + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go new file mode 100644 index 0000000000..8867ebe5f0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectSymlink struct { + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Scope string `json:"scope,omitempty"` + Pathtoclone string `json:"pathtoclone,omitempty"` + AccessMask int32 `json:"access_mask,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go new file mode 100644 index 0000000000..1384ed8882 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModificationRequest struct { + PropertyType PropertyType `json:"PropertyType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go new file mode 100644 index 0000000000..6364da8e23 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" + +type ModifySettingRequest struct { + ResourcePath string `json:"ResourcePath,omitempty"` + + RequestType guestrequest.RequestType `json:"RequestType,omitempty"` // NOTE: Swagger generated as string. Locally updated. + + Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated + + GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go new file mode 100644 index 0000000000..ccf8b938f3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Mouse struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go new file mode 100644 index 0000000000..7408abd317 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NetworkAdapter struct { + EndpointId string `json:"EndpointId,omitempty"` + MacAddress string `json:"MacAddress,omitempty"` + // The I/O virtualization (IOV) offloading configuration. + IovSettings *IovSettings `json:"IovSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go new file mode 100644 index 0000000000..e5ea187a29 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Networking struct { + AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"` + + DnsSearchList string `json:"DnsSearchList,omitempty"` + + NetworkSharedContainerName string `json:"NetworkSharedContainerName,omitempty"` + + // Guid in windows; string in linux + Namespace string `json:"Namespace,omitempty"` + + NetworkAdapters []string `json:"NetworkAdapters,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go new file mode 100644 index 0000000000..d96c9501f3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Notification data that is indicated to components running in the Virtual Machine. +type PauseNotification struct { + Reason string `json:"Reason,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go new file mode 100644 index 0000000000..21707a88eb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Options for HcsPauseComputeSystem +type PauseOptions struct { + SuspensionLevel string `json:"SuspensionLevel,omitempty"` + + HostedNotification *PauseNotification `json:"HostedNotification,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go new file mode 100644 index 0000000000..29d8c8012f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9 struct { + Shares []Plan9Share `json:"Shares,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go new file mode 100644 index 0000000000..41f8fdea02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go @@ -0,0 +1,34 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9Share struct { + Name string `json:"Name,omitempty"` + + // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. + AccessName string `json:"AccessName,omitempty"` + + Path string `json:"Path,omitempty"` + + Port int32 `json:"Port,omitempty"` + + // Flags are marked private. Until they are exported correctly + // + // ReadOnly 0x00000001 + // LinuxMetadata 0x00000004 + // CaseSensitive 0x00000008 + Flags int32 `json:"Flags,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go new file mode 100644 index 0000000000..e9a662dd59 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go @@ -0,0 +1,33 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Information about a process running in a container +type ProcessDetails struct { + ProcessId int32 `json:"ProcessId,omitempty"` + + ImageName string `json:"ImageName,omitempty"` + + CreateTimestamp time.Time `json:"CreateTimestamp,omitempty"` + + UserTime100ns int32 `json:"UserTime100ns,omitempty"` + + KernelTime100ns int32 `json:"KernelTime100ns,omitempty"` + + MemoryCommitBytes int32 `json:"MemoryCommitBytes,omitempty"` + + MemoryWorkingSetPrivateBytes int32 `json:"MemoryWorkingSetPrivateBytes,omitempty"` + + MemoryWorkingSetSharedBytes int32 `json:"MemoryWorkingSetSharedBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go new file mode 100644 index 0000000000..862b7911e2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" + +// Passed to HcsRpc_ModifyProcess +type ProcessModifyRequest struct { + Operation guestrequest.ProcessModifyOperation `json:"Operation,omitempty"` // NOTE: Swagger generated as string. Locally updated. + + ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"` + + CloseHandle *CloseHandle `json:"CloseHandle,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go new file mode 100644 index 0000000000..82b0d0532b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go @@ -0,0 +1,46 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessParameters struct { + ApplicationName string `json:"ApplicationName,omitempty"` + + CommandLine string `json:"CommandLine,omitempty"` + + // optional alternative to CommandLine, currently only supported by Linux GCS + CommandArgs []string `json:"CommandArgs,omitempty"` + + User string `json:"User,omitempty"` + + WorkingDirectory string `json:"WorkingDirectory,omitempty"` + + Environment map[string]string `json:"Environment,omitempty"` + + // if set, will run as low-privilege process + RestrictedToken bool `json:"RestrictedToken,omitempty"` + + // if set, ignore StdErrPipe + EmulateConsole bool `json:"EmulateConsole,omitempty"` + + CreateStdInPipe bool `json:"CreateStdInPipe,omitempty"` + + CreateStdOutPipe bool `json:"CreateStdOutPipe,omitempty"` + + CreateStdErrPipe bool `json:"CreateStdErrPipe,omitempty"` + + // height then width + ConsoleSize []int32 `json:"ConsoleSize,omitempty"` + + // if set, find an existing session for the user and create the process in it + UseExistingLogin bool `json:"UseExistingLogin,omitempty"` + + // if set, use the legacy console instead of conhost + UseLegacyConsole bool `json:"UseLegacyConsole,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go new file mode 100644 index 0000000000..3c371d4650 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go @@ -0,0 +1,24 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// NOTE: Swagger generated fields as int32. Locally updated to uint16 to match documentation. +// https://learn.microsoft.com/en-us/virtualization/api/hcs/schemareference#ConsoleSize + +// Status of a process running in a container +type ProcessStatus struct { + ProcessId uint32 `json:"ProcessId,omitempty"` // NOTE: Swagger generated as int32. Locally updated to match documentation. + + Exited bool `json:"Exited,omitempty"` + + ExitCode uint32 `json:"ExitCode,omitempty"` // NOTE: Swagger generated as int32. Locally updated to match documentation. + + LastWaitResult int32 `json:"LastWaitResult,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go new file mode 100644 index 0000000000..bb24e88da1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor struct { + Count int32 `json:"Count,omitempty"` + + Maximum int32 `json:"Maximum,omitempty"` + + Weight int32 `json:"Weight,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go new file mode 100644 index 0000000000..c64f335ec7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.5 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor2 struct { + Count int32 `json:"Count,omitempty"` + + Limit int32 `json:"Limit,omitempty"` + + Weight int32 `json:"Weight,omitempty"` + + ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` + + // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. + CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go new file mode 100644 index 0000000000..6157e25225 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU runtime statistics +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"` + + RuntimeUser100ns uint64 `json:"RuntimeUser100ns,omitempty"` + + RuntimeKernel100ns uint64 `json:"RuntimeKernel100ns,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go new file mode 100644 index 0000000000..885156e77f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessorTopology struct { + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go new file mode 100644 index 0000000000..0c7efe8d40 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go @@ -0,0 +1,54 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + v1 "github.com/containerd/cgroups/v3/cgroup1/stats" +) + +type Properties struct { + Id string `json:"Id,omitempty"` + + SystemType string `json:"SystemType,omitempty"` + + RuntimeOsType string `json:"RuntimeOsType,omitempty"` + + Name string `json:"Name,omitempty"` + + Owner string `json:"Owner,omitempty"` + + RuntimeId string `json:"RuntimeId,omitempty"` + + RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` + + State string `json:"State,omitempty"` + + Stopped bool `json:"Stopped,omitempty"` + + ExitType string `json:"ExitType,omitempty"` + + Memory *MemoryInformationForVm `json:"Memory,omitempty"` + + Statistics *Statistics `json:"Statistics,omitempty"` + + ProcessList []ProcessDetails `json:"ProcessList,omitempty"` + + TerminateOnLastHandleClosed bool `json:"TerminateOnLastHandleClosed,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"` + + GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"` + + // Metrics is not part of the API for HCS but this is used for LCOW v2 to + // return the full cgroup metrics from the guest. + Metrics *v1.Metrics `json:"LCOWMetrics,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go new file mode 100644 index 0000000000..d6d80df131 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// By default the basic properties will be returned. This query provides a way to request specific properties. +type PropertyQuery struct { + PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go new file mode 100644 index 0000000000..98f2c96edb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type PropertyType string + +const ( + PTMemory PropertyType = "Memory" + PTGuestMemory PropertyType = "GuestMemory" + PTStatistics PropertyType = "Statistics" + PTProcessList PropertyType = "ProcessList" + PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed" + PTSharedMemoryRegion PropertyType = "SharedMemoryRegion" + PTContainerCredentialGuard PropertyType = "ContainerCredentialGuard" // This field is not generated by swagger. This was added manually. + PTGuestConnection PropertyType = "GuestConnection" + PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" + PTProcessorTopology PropertyType = "ProcessorTopology" + PTCPUGroup PropertyType = "CpuGroup" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go new file mode 100644 index 0000000000..8d5f5c1719 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RdpConnectionOptions struct { + AccessSids []string `json:"AccessSids,omitempty"` + + NamedPipe string `json:"NamedPipe,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go new file mode 100644 index 0000000000..006906f6e2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryChanges struct { + AddValues []RegistryValue `json:"AddValues,omitempty"` + + DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go new file mode 100644 index 0000000000..26fde99c74 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryKey struct { + Hive string `json:"Hive,omitempty"` + + Name string `json:"Name,omitempty"` + + Volatile bool `json:"Volatile,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go new file mode 100644 index 0000000000..3f203176c3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go @@ -0,0 +1,30 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryValue struct { + Key *RegistryKey `json:"Key,omitempty"` + + Name string `json:"Name,omitempty"` + + Type_ string `json:"Type,omitempty"` + + // One and only one value type must be set. + StringValue string `json:"StringValue,omitempty"` + + BinaryValue string `json:"BinaryValue,omitempty"` + + DWordValue int32 `json:"DWordValue,omitempty"` + + QWordValue int32 `json:"QWordValue,omitempty"` + + // Only used if RegistryValueType is CustomType The data is in BinaryValue + CustomType int32 `json:"CustomType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go new file mode 100644 index 0000000000..778ff58735 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RestoreState struct { + + // The path to the save state file to restore the system from. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` + + // The ID of the template system to clone this new system off of. An empty string indicates the system should not be cloned from a template. + TemplateSystemId string `json:"TemplateSystemId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go new file mode 100644 index 0000000000..e55fa1d98a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SaveOptions struct { + + // The type of save operation to be performed. + SaveType string `json:"SaveType,omitempty"` + + // The path to the file that will container the saved state. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go new file mode 100644 index 0000000000..bf253a470b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Scsi struct { + + // Map of attachments, where the key is the integer LUN number on the controller. + Attachments map[string]Attachment `json:"Attachments,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go new file mode 100644 index 0000000000..14f0299e32 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SecuritySettings struct { + // Enablement of Trusted Platform Module on the computer system + EnableTpm bool `json:"EnableTpm,omitempty"` + Isolation *IsolationSettings `json:"Isolation,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go new file mode 100644 index 0000000000..b8142ca6a6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import "encoding/json" + +type ServiceProperties struct { + // Changed Properties field to []json.RawMessage from []interface{} to avoid having to + // remarshal sp.Properties[n] and unmarshal into the type(s) we want. + Properties []json.RawMessage `json:"Properties,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go new file mode 100644 index 0000000000..df9baa9219 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryConfiguration struct { + Regions []SharedMemoryRegion `json:"Regions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go new file mode 100644 index 0000000000..825b71865d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegion struct { + SectionName string `json:"SectionName,omitempty"` + + StartOffset int32 `json:"StartOffset,omitempty"` + + Length int32 `json:"Length,omitempty"` + + AllowGuestWrite bool `json:"AllowGuestWrite,omitempty"` + + HiddenFromGuest bool `json:"HiddenFromGuest,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go new file mode 100644 index 0000000000..f67b08eb57 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegionInfo struct { + SectionName string `json:"SectionName,omitempty"` + + GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go new file mode 100644 index 0000000000..5eaf6a7f4a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Silo job information +type SiloProperties struct { + Enabled bool `json:"Enabled,omitempty"` + + JobName string `json:"JobName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go new file mode 100644 index 0000000000..ba7a6b3963 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go @@ -0,0 +1,29 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Runtime statistics for a container +type Statistics struct { + Timestamp time.Time `json:"Timestamp,omitempty"` + + ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"` + + Uptime100ns uint64 `json:"Uptime100ns,omitempty"` + + Processor *ProcessorStats `json:"Processor,omitempty"` + + Memory *MemoryStats `json:"Memory,omitempty"` + + Storage *StorageStats `json:"Storage,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go new file mode 100644 index 0000000000..2627af9132 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Storage struct { + + // List of layers that describe the parent hierarchy for a container's storage. These layers combined together, presented as a disposable and/or committable working storage, are used by the container to record all changes done to the parent layers. + Layers []Layer `json:"Layers,omitempty"` + + // Path that points to the scratch space of a container, where parent layers are combined together to present a new disposable and/or committable layer with the changes done during its runtime. + Path string `json:"Path,omitempty"` + + QoS *StorageQoS `json:"QoS,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go new file mode 100644 index 0000000000..9c5e6eb532 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type StorageQoS struct { + IopsMaximum int32 `json:"IopsMaximum,omitempty"` + + BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go new file mode 100644 index 0000000000..4f042ffd93 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Storage runtime statistics +type StorageStats struct { + ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"` + + ReadSizeBytes uint64 `json:"ReadSizeBytes,omitempty"` + + WriteCountNormalized uint64 `json:"WriteCountNormalized,omitempty"` + + WriteSizeBytes uint64 `json:"WriteSizeBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go new file mode 100644 index 0000000000..72de801493 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go @@ -0,0 +1,28 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SystemTime struct { + Year int32 `json:"Year,omitempty"` + + Month int32 `json:"Month,omitempty"` + + DayOfWeek int32 `json:"DayOfWeek,omitempty"` + + Day int32 `json:"Day,omitempty"` + + Hour int32 `json:"Hour,omitempty"` + + Minute int32 `json:"Minute,omitempty"` + + Second int32 `json:"Second,omitempty"` + + Milliseconds int32 `json:"Milliseconds,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go new file mode 100644 index 0000000000..529743d753 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type TimeZoneInformation struct { + Bias int32 `json:"Bias,omitempty"` + + StandardName string `json:"StandardName,omitempty"` + + StandardDate *SystemTime `json:"StandardDate,omitempty"` + + StandardBias int32 `json:"StandardBias,omitempty"` + + DaylightName string `json:"DaylightName,omitempty"` + + DaylightDate *SystemTime `json:"DaylightDate,omitempty"` + + DaylightBias int32 `json:"DaylightBias,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go new file mode 100644 index 0000000000..8348699403 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Topology struct { + Memory *Memory2 `json:"Memory,omitempty"` + + Processor *Processor2 `json:"Processor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go new file mode 100644 index 0000000000..9228923fe4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Uefi struct { + EnableDebugger bool `json:"EnableDebugger,omitempty"` + + ApplySecureBootTemplate string `json:"ApplySecureBootTemplate,omitempty"` + + SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` + + BootThis *UefiBootEntry `json:"BootThis,omitempty"` + + Console string `json:"Console,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go new file mode 100644 index 0000000000..3ab409d825 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type UefiBootEntry struct { + DeviceType string `json:"DeviceType,omitempty"` + + DevicePath string `json:"DevicePath,omitempty"` + + DiskNumber int32 `json:"DiskNumber,omitempty"` + + OptionalData string `json:"OptionalData,omitempty"` + + VmbFsRootPath string `json:"VmbFsRootPath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go new file mode 100644 index 0000000000..2abfccca31 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Version struct { + Major int32 `json:"Major,omitempty"` + + Minor int32 `json:"Minor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go new file mode 100644 index 0000000000..ec5d0fb936 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VideoMonitor struct { + HorizontalResolution int32 `json:"HorizontalResolution,omitempty"` + + VerticalResolution int32 `json:"VerticalResolution,omitempty"` + + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go new file mode 100644 index 0000000000..1e0fab2890 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go @@ -0,0 +1,36 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachine struct { + + // StopOnReset is private in the schema. If regenerated need to put back. + StopOnReset bool `json:"StopOnReset,omitempty"` + + Chipset *Chipset `json:"Chipset,omitempty"` + + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + + Devices *Devices `json:"Devices,omitempty"` + + GuestState *GuestState `json:"GuestState,omitempty"` + + RestoreState *RestoreState `json:"RestoreState,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + + GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` + + SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` + + DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go new file mode 100644 index 0000000000..91a3c83d4f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualNodeInfo struct { + VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"` + + PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"` + + VirtualProcessorCount int32 `json:"VirtualProcessorCount,omitempty"` + + MemoryUsageInPages int32 `json:"MemoryUsageInPages,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go new file mode 100644 index 0000000000..f5b7f3e38c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemController struct { + Devices map[string]VirtualPMemDevice `json:"Devices,omitempty"` + + MaximumCount uint32 `json:"MaximumCount,omitempty"` + + MaximumSizeBytes uint64 `json:"MaximumSizeBytes,omitempty"` + + Backing string `json:"Backing,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go new file mode 100644 index 0000000000..70cf2d90de --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemDevice struct { + HostPath string `json:"HostPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go new file mode 100644 index 0000000000..9ef322f615 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go new file mode 100644 index 0000000000..f5e05903c5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciDevice struct { + Functions []VirtualPciFunction `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go new file mode 100644 index 0000000000..cedb7d18bc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciFunction struct { + DeviceInstancePath string `json:",omitempty"` + + VirtualFunction uint16 `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go new file mode 100644 index 0000000000..362df363e1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmb struct { + Shares []VirtualSmbShare `json:"Shares,omitempty"` + + DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go new file mode 100644 index 0000000000..915e9b6386 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShare struct { + Name string `json:"Name,omitempty"` + + Path string `json:"Path,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` + + Options *VirtualSmbShareOptions `json:"Options,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go new file mode 100644 index 0000000000..75196bd8c8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go @@ -0,0 +1,62 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShareOptions struct { + ReadOnly bool `json:"ReadOnly,omitempty"` + + // convert exclusive access to shared read access + ShareRead bool `json:"ShareRead,omitempty"` + + // all opens will use cached I/O + CacheIo bool `json:"CacheIo,omitempty"` + + // disable oplock support + NoOplocks bool `json:"NoOplocks,omitempty"` + + // Acquire the backup privilege when attempting to open + TakeBackupPrivilege bool `json:"TakeBackupPrivilege,omitempty"` + + // Use the identity of the share root when opening + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + // disable Direct Mapping + NoDirectmap bool `json:"NoDirectmap,omitempty"` + + // disable Byterange locks + NoLocks bool `json:"NoLocks,omitempty"` + + // disable Directory CHange Notifications + NoDirnotify bool `json:"NoDirnotify,omitempty"` + + // share is use for VM shared memory + VmSharedMemory bool `json:"VmSharedMemory,omitempty"` + + // allow access only to the files specified in AllowedFiles + RestrictFileAccess bool `json:"RestrictFileAccess,omitempty"` + + // disable all oplocks except Level II + ForceLevelIIOplocks bool `json:"ForceLevelIIOplocks,omitempty"` + + // Allow the host to reparse this base layer + ReparseBaseLayer bool `json:"ReparseBaseLayer,omitempty"` + + // Enable pseudo-oplocks + PseudoOplocks bool `json:"PseudoOplocks,omitempty"` + + // All opens will use non-cached IO + NonCacheIo bool `json:"NonCacheIo,omitempty"` + + // Enable pseudo directory change notifications + PseudoDirnotify bool `json:"PseudoDirnotify,omitempty"` + + // Block directory enumeration, renames, and deletes. + SingleFileMapping bool `json:"SingleFileMapping,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go new file mode 100644 index 0000000000..8e1836dd6b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VmMemory struct { + AvailableMemory int32 `json:"AvailableMemory,omitempty"` + + AvailableMemoryBuffer int32 `json:"AvailableMemoryBuffer,omitempty"` + + ReservedMemory uint64 `json:"ReservedMemory,omitempty"` + + AssignedMemory uint64 `json:"AssignedMemory,omitempty"` + + SlpActive bool `json:"SlpActive,omitempty"` + + BalancingEnabled bool `json:"BalancingEnabled,omitempty"` + + DmOperationInProgress bool `json:"DmOperationInProgress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go new file mode 100644 index 0000000000..de1b9cf1ae --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ProcessorLimits is used when modifying processor scheduling limits of a virtual machine. +type ProcessorLimits struct { + // Maximum amount of host CPU resources that the virtual machine can use. + Limit uint64 `json:"Limit,omitempty"` + // Value describing the relative priority of this virtual machine compared to other virtual machines. + Weight uint64 `json:"Weight,omitempty"` + // Minimum amount of host CPU resources that the virtual machine is guaranteed. + Reservation uint64 `json:"Reservation,omitempty"` + // Provides the target maximum CPU frequency, in MHz, for a virtual machine. + MaximumFrequencyMHz uint32 `json:"MaximumFrequencyMHz,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go new file mode 100644 index 0000000000..8ed7e566d6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type WindowsCrashReporting struct { + DumpFileName string `json:"DumpFileName,omitempty"` + + MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go new file mode 100644 index 0000000000..a46b0051df --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go @@ -0,0 +1,51 @@ +//go:build windows + +package hcs + +import ( + "context" + "encoding/json" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vmcompute" +) + +// GetServiceProperties returns properties of the host compute service. +func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) { + operation := "hcs::GetServiceProperties" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &hcsschema.ServiceProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, err + } + return properties, nil +} + +// ModifyServiceSettings modifies settings of the host compute service. +func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error { + operation := "hcs::ModifyServiceSettings" + + settingsJSON, err := json.Marshal(settings) + if err != nil { + return err + } + resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return &HcsError{Op: operation, Err: err, Events: events} + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go new file mode 100644 index 0000000000..cf1db7da9a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -0,0 +1,872 @@ +//go:build windows + +package hcs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/cow" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/jobobject" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/timeout" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +type System struct { + handleLock sync.RWMutex + handle vmcompute.HcsSystem + id string + callbackNumber uintptr + + closedWaitOnce sync.Once + waitBlock chan struct{} + waitError error + exitError error + os, typ, owner string + startTime time.Time +} + +var _ cow.Container = &System{} +var _ cow.ProcessHost = &System{} + +func newSystem(id string) *System { + return &System{ + id: id, + waitBlock: make(chan struct{}), + } +} + +// Implementation detail for silo naming, this should NOT be relied upon very heavily. +func siloNameFmt(containerID string) string { + return fmt.Sprintf(`\Container_%s`, containerID) +} + +// CreateComputeSystem creates a new compute system with the given configuration but does not start it. +func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { + operation := "hcs::CreateComputeSystem" + + // hcsCreateComputeSystemContext is an async operation. Start the outer span + // here to measure the full create time. + ctx, span := oc.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", id)) + + computeSystem := newSystem(id) + + hcsDocumentB, err := json.Marshal(hcsDocumentInterface) + if err != nil { + return nil, err + } + + hcsDocument := string(hcsDocumentB) + + var ( + identity syscall.Handle + resultJSON string + createError error + ) + computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity) + if createError == nil || IsPending(createError) { + defer func() { + if err != nil { + computeSystem.Close() + } + }() + if err = computeSystem.registerCallback(ctx); err != nil { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + _ = computeSystem.Terminate(ctx) + return nil, makeSystemError(computeSystem, operation, err, nil) + } + } + + events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, + hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) + if err != nil { + if err == ErrTimeout { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + _ = computeSystem.Terminate(ctx) + } + return nil, makeSystemError(computeSystem, operation, err, events) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err + } + return computeSystem, nil +} + +// OpenComputeSystem opens an existing compute system by ID. +func OpenComputeSystem(ctx context.Context, id string) (*System, error) { + operation := "hcs::OpenComputeSystem" + + computeSystem := newSystem(id) + handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + computeSystem.handle = handle + defer func() { + if err != nil { + computeSystem.Close() + } + }() + if err = computeSystem.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err + } + return computeSystem, nil +} + +func (computeSystem *System) getCachedProperties(ctx context.Context) error { + props, err := computeSystem.Properties(ctx) + if err != nil { + return err + } + computeSystem.typ = strings.ToLower(props.SystemType) + computeSystem.os = strings.ToLower(props.RuntimeOSType) + computeSystem.owner = strings.ToLower(props.Owner) + if computeSystem.os == "" && computeSystem.typ == "container" { + // Pre-RS5 HCS did not return the OS, but it only supported containers + // that ran Windows. + computeSystem.os = "windows" + } + return nil +} + +// OS returns the operating system of the compute system, "linux" or "windows". +func (computeSystem *System) OS() string { + return computeSystem.os +} + +// IsOCI returns whether processes in the compute system should be created via +// OCI. +func (computeSystem *System) IsOCI() bool { + return computeSystem.os == "linux" && computeSystem.typ == "container" +} + +// GetComputeSystems gets a list of the compute systems on the system that match the query +func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) { + operation := "hcs::GetComputeSystems" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + + computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if computeSystemsJSON == "" { + return nil, ErrUnexpectedValue + } + computeSystems := []schema1.ContainerProperties{} + if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil { + return nil, err + } + + return computeSystems, nil +} + +// Start synchronously starts the computeSystem. +func (computeSystem *System) Start(ctx context.Context) (err error) { + operation := "hcs::System::Start" + + // hcsStartComputeSystemContext is an async operation. Start the outer span + // here to measure the full start time. + ctx, span := oc.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + // prevent starting an exited system because waitblock we do not recreate waitBlock + // or rerun waitBackground, so we have no way to be notified of it closing again + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, + hcsNotificationSystemStartCompleted, &timeout.SystemStart) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + computeSystem.startTime = time.Now() + return nil +} + +// ID returns the compute system's identifier. +func (computeSystem *System) ID() string { + return computeSystem.id +} + +// Shutdown requests a compute system shutdown. +func (computeSystem *System) Shutdown(ctx context.Context) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Shutdown" + + if computeSystem.handle == 0 || computeSystem.stopped() { + return nil + } + + resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) + } + return nil +} + +// Terminate requests a compute system terminate. +func (computeSystem *System) Terminate(ctx context.Context) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Terminate" + + if computeSystem.handle == 0 || computeSystem.stopped() { + return nil + } + + resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) + } + return nil +} + +// waitBackground waits for the compute system exit notification. Once received +// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `computeSystem.handle` but `Wait` is +// safe to call multiple times. +func (computeSystem *System) waitBackground() { + operation := "hcs::System::waitBackground" + ctx, span := oc.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + switch err { + case nil: + log.G(ctx).Debug("system exited") + case ErrVmcomputeUnexpectedExit: + log.G(ctx).Debug("unexpected system exit") + computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) + err = nil + default: + err = makeSystemError(computeSystem, operation, err, nil) + } + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = err + close(computeSystem.waitBlock) + }) + oc.SetSpanStatus(span, err) +} + +func (computeSystem *System) WaitChannel() <-chan struct{} { + return computeSystem.waitBlock +} + +func (computeSystem *System) WaitError() error { + return computeSystem.waitError +} + +// Wait synchronously waits for the compute system to shutdown or terminate. +// If the compute system has already exited returns the previous error (if any). +func (computeSystem *System) Wait() error { + return computeSystem.WaitCtx(context.Background()) +} + +// WaitCtx synchronously waits for the compute system to shutdown or terminate, or the context to be cancelled. +// +// See [System.Wait] for more information. +func (computeSystem *System) WaitCtx(ctx context.Context) error { + select { + case <-computeSystem.WaitChannel(): + return computeSystem.WaitError() + case <-ctx.Done(): + return ctx.Err() + } +} + +// stopped returns true if the compute system stopped. +func (computeSystem *System) stopped() bool { + select { + case <-computeSystem.waitBlock: + return true + default: + } + return false +} + +// ExitError returns an error describing the reason the compute system terminated. +func (computeSystem *System) ExitError() error { + if !computeSystem.stopped() { + return errors.New("container not exited") + } + if computeSystem.waitError != nil { + return computeSystem.waitError + } + return computeSystem.exitError +} + +// Properties returns the requested container properties targeting a V1 schema container. +func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Properties" + + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &schema1.ContainerProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + return properties, nil +} + +// queryInProc handles querying for container properties without reaching out to HCS. `props` +// will be updated to contain any data returned from the queries present in `types`. If any properties +// failed to be queried they will be tallied up and returned in as the first return value. Failures on +// query are NOT considered errors; the only failure case for this method is if the containers job object +// cannot be opened. +func (computeSystem *System) queryInProc( + ctx context.Context, + props *hcsschema.Properties, + types []hcsschema.PropertyType, +) ([]hcsschema.PropertyType, error) { + // In the future we can make use of some new functionality in the HCS that allows you + // to pass a job object for HCS to use for the container. Currently, the only way we'll + // be able to open the job/silo is if we're running as SYSTEM. + jobOptions := &jobobject.Options{ + UseNTVariant: true, + Name: siloNameFmt(computeSystem.id), + } + job, err := jobobject.Open(ctx, jobOptions) + if err != nil { + return nil, err + } + defer job.Close() + + var fallbackQueryTypes []hcsschema.PropertyType + for _, propType := range types { + switch propType { + case hcsschema.PTStatistics: + // Handle a bad caller asking for the same type twice. No use in re-querying if this is + // filled in already. + if props.Statistics == nil { + props.Statistics, err = computeSystem.statisticsInProc(job) + if err != nil { + log.G(ctx).WithError(err).Warn("failed to get statistics in-proc") + + fallbackQueryTypes = append(fallbackQueryTypes, propType) + } + } + default: + fallbackQueryTypes = append(fallbackQueryTypes, propType) + } + } + + return fallbackQueryTypes, nil +} + +// statisticsInProc emulates what HCS does to grab statistics for a given container with a small +// change to make grabbing the private working set total much more efficient. +func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) { + // Start timestamp for these stats before we grab them to match HCS + timestamp := time.Now() + + memInfo, err := job.QueryMemoryStats() + if err != nil { + return nil, err + } + + processorInfo, err := job.QueryProcessorStats() + if err != nil { + return nil, err + } + + storageInfo, err := job.QueryStorageStats() + if err != nil { + return nil, err + } + + // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation + // with the class SystemProcessInformation which returns an array containing system information for *every* + // process running on the machine. They then grab the pids that are running in the container and filter down + // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't + // work well as performance should get worse if more processess are running on the machine in general and not + // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored + // as well which isn't great and is wasted work to fetch. + // + // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private + // working set ourselves and ask for everything else separately. The optimization we can make here is + // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating + // the private working set in a more efficient manner by: + // + // 1. Find the pids running in the silo + // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access) + // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters + // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2. + privateWorkingSet, err := job.QueryPrivateWorkingSet() + if err != nil { + return nil, err + } + + return &hcsschema.Statistics{ + Timestamp: timestamp, + ContainerStartTime: computeSystem.startTime, + Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100, + Memory: &hcsschema.MemoryStats{ + MemoryUsageCommitBytes: memInfo.JobMemory, + MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed, + MemoryUsagePrivateWorkingSetBytes: privateWorkingSet, + }, + Processor: &hcsschema.ProcessorStats{ + RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime), + RuntimeUser100ns: uint64(processorInfo.TotalUserTime), + TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime), + }, + Storage: &hcsschema.StorageStats{ + ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount), + ReadSizeBytes: storageInfo.ReadStats.TotalSize, + WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount), + WriteSizeBytes: storageInfo.WriteStats.TotalSize, + }, + }, nil +} + +// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types. +func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) { + operation := "hcs::System::PropertiesV2" + + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + props := &hcsschema.Properties{} + if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + return props, nil +} + +// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system. +func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + // Let HCS tally up the total for VM based queries instead of querying ourselves. + if computeSystem.typ != "container" { + return computeSystem.hcsPropertiesV2Query(ctx, types) + } + + // Define a starter Properties struct with the default fields returned from every + // query. Owner is only returned from Statistics but it's harmless to include. + properties := &hcsschema.Properties{ + Id: computeSystem.id, + SystemType: computeSystem.typ, + RuntimeOsType: computeSystem.os, + Owner: computeSystem.owner, + } + + logEntry := log.G(ctx) + // First lets try and query ourselves without reaching to HCS. If any of the queries fail + // we'll take note and fallback to querying HCS for any of the failed types. + fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types) + if err == nil && len(fallbackTypes) == 0 { + return properties, nil + } else if err != nil { + logEntry = logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err)) + fallbackTypes = types + } + + logEntry.WithFields(logrus.Fields{ + logfields.ContainerID: computeSystem.id, + "propertyTypes": fallbackTypes, + }).Info("falling back to HCS for property type queries") + + hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes) + if err != nil { + return nil, err + } + + // Now add in anything that we might have successfully queried in process. + if properties.Statistics != nil { + hcsProperties.Statistics = properties.Statistics + hcsProperties.Owner = properties.Owner + } + + // For future support for querying processlist in-proc as well. + if properties.ProcessList != nil { + hcsProperties.ProcessList = properties.ProcessList + } + + return hcsProperties, nil +} + +// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Pause(ctx context.Context) (err error) { + operation := "hcs::System::Pause" + + // hcsPauseComputeSystemContext is an async operation. Start the outer span + // here to measure the full pause time. + ctx, span := oc.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, + hcsNotificationSystemPauseCompleted, &timeout.SystemPause) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Resume(ctx context.Context) (err error) { + operation := "hcs::System::Resume" + + // hcsResumeComputeSystemContext is an async operation. Start the outer span + // here to measure the full restore time. + ctx, span := oc.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, + hcsNotificationSystemResumeCompleted, &timeout.SystemResume) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// Save the compute system +func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { + operation := "hcs::System::Save" + + // hcsSaveComputeSystemContext is an async operation. Start the outer span + // here to measure the full save time. + ctx, span := oc.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + saveOptions, err := json.Marshal(options) + if err != nil { + return err + } + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) + events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, + hcsNotificationSystemSaveCompleted, &timeout.SystemSave) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return nil, nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + configurationb, err := json.Marshal(c) + if err != nil { + return nil, nil, makeSystemError(computeSystem, operation, err, nil) + } + + configuration := string(configurationb) + processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) + events := processHcsResult(ctx, resultJSON) + if err != nil { + if v2, ok := c.(*hcsschema.ProcessParameters); ok { + operation += ": " + v2.CommandLine + } else if v1, ok := c.(*schema1.ProcessConfig); ok { + operation += ": " + v1.CommandLine + } + return nil, nil, makeSystemError(computeSystem, operation, err, events) + } + + log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid") + return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil +} + +// CreateProcess launches a new process within the computeSystem. +func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) { + operation := "hcs::System::CreateProcess" + process, processInfo, err := computeSystem.createProcess(ctx, operation, c) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + process.Close() + } + }() + + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + process.stdin = pipes[0] + process.stdout = pipes[1] + process.stderr = pipes[2] + process.hasCachedStdio = true + + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go process.waitBackground() + + return process, nil +} + +// OpenProcess gets an interface to an existing process within the computeSystem. +func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::OpenProcess" + + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + process := newProcess(processHandle, pid, computeSystem) + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go process.waitBackground() + + return process, nil +} + +// Close cleans up any state associated with the compute system but does not terminate or wait for it. +func (computeSystem *System) Close() error { + return computeSystem.CloseCtx(context.Background()) +} + +// CloseCtx is similar to [System.Close], but accepts a context. +// +// The context is used for all operations, including waits, so timeouts/cancellations may prevent +// proper system cleanup. +func (computeSystem *System) CloseCtx(ctx context.Context) (err error) { + operation := "hcs::System::Close" + ctx, span := oc.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.Lock() + defer computeSystem.handleLock.Unlock() + + // Don't double free this + if computeSystem.handle == 0 { + return nil + } + + if err = computeSystem.unregisterCallback(ctx); err != nil { + return makeSystemError(computeSystem, operation, err, nil) + } + + err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle) + if err != nil { + return makeSystemError(computeSystem, operation, err, nil) + } + + computeSystem.handle = 0 + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = ErrAlreadyClosed + close(computeSystem.waitBlock) + }) + + return nil +} + +func (computeSystem *System) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newSystemChannels(), + systemID: computeSystem.id, + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = callbackContext + callbackMapLock.Unlock() + + callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, + notificationWatcherCallback, callbackNumber) + if err != nil { + return err + } + callbackContext.handle = callbackHandle + computeSystem.callbackNumber = callbackNumber + + return nil +} + +func (computeSystem *System) unregisterCallback(ctx context.Context) error { + callbackNumber := computeSystem.callbackNumber + + callbackMapLock.RLock() + callbackContext := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if callbackContext == nil { + return nil + } + + handle := callbackContext.handle + + if handle == 0 { + return nil + } + + // hcsUnregisterComputeSystemCallback has its own synchronization + // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle) + if err != nil { + return err + } + + closeChannels(callbackContext.channels) + + callbackMapLock.Lock() + delete(callbackMap, callbackNumber) + callbackMapLock.Unlock() + + handle = 0 //nolint:ineffassign + + return nil +} + +// Modify the System by sending a request to HCS +func (computeSystem *System) Modify(ctx context.Context, config interface{}) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Modify" + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + requestBytes, err := json.Marshal(config) + if err != nil { + return err + } + + requestJSON := string(requestBytes) + resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go new file mode 100644 index 0000000000..5dcb97eb39 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -0,0 +1,64 @@ +//go:build windows + +package hcs + +import ( + "context" + "io" + "syscall" + + "github.com/Microsoft/go-winio" + diskutil "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/computestorage" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles +// if there is an error. +func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { + fs := make([]io.ReadWriteCloser, len(hs)) + for i, h := range hs { + if h != syscall.Handle(0) { + if err == nil { + fs[i], err = winio.MakeOpenFile(h) + } + if err != nil { + syscall.Close(h) + } + } + } + if err != nil { + for _, f := range fs { + if f != nil { + f.Close() + } + } + return nil, err + } + return fs, nil +} + +// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. +func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { + if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { + return errors.Wrap(err, "failed to create VHD") + } + + vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) + if err != nil { + return errors.Wrap(err, "failed to open VHD") + } + defer func() { + err2 := windows.CloseHandle(windows.Handle(vhd)) + if err == nil { + err = errors.Wrap(err2, "failed to close VHD") + } + }() + + if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { + return errors.Wrap(err, "failed to format VHD") + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go new file mode 100644 index 0000000000..3a51ed1955 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go @@ -0,0 +1,82 @@ +//go:build windows + +package hcs + +import ( + "context" + "time" + + "github.com/Microsoft/hcsshim/internal/log" +) + +func processAsyncHcsResult( + ctx context.Context, + err error, + resultJSON string, + callbackNumber uintptr, + expectedNotification hcsNotification, + timeout *time.Duration, +) ([]ErrorEvent, error) { + events := processHcsResult(ctx, resultJSON) + if IsPending(err) { + return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout) + } + + return events, err +} + +func waitForNotification( + ctx context.Context, + callbackNumber uintptr, + expectedNotification hcsNotification, + timeout *time.Duration, +) error { + callbackMapLock.RLock() + if _, ok := callbackMap[callbackNumber]; !ok { + callbackMapLock.RUnlock() + log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap") + return ErrHandleClose + } + channels := callbackMap[callbackNumber].channels + callbackMapLock.RUnlock() + + expectedChannel := channels[expectedNotification] + if expectedChannel == nil { + log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification") + return ErrInvalidNotificationType + } + + var c <-chan time.Time + if timeout != nil { + timer := time.NewTimer(*timeout) + c = timer.C + defer timer.Stop() + } + + select { + case err, ok := <-expectedChannel: + if !ok { + return ErrHandleClose + } + return err + case err, ok := <-channels[hcsNotificationSystemExited]: + if !ok { + return ErrHandleClose + } + // If the expected notification is hcsNotificationSystemExited which of the two selects + // chosen is random. Return the raw error if hcsNotificationSystemExited is expected + if channels[hcsNotificationSystemExited] == expectedChannel { + return err + } + return ErrUnexpectedContainerExit + case _, ok := <-channels[hcsNotificationServiceDisconnect]: + if !ok { + return ErrHandleClose + } + // hcsNotificationServiceDisconnect should never be an expected notification + // it does not need the same handling as hcsNotificationSystemExited + return ErrUnexpectedProcessAbort + case <-c: + return ErrTimeout + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go new file mode 100644 index 0000000000..ce70676789 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go @@ -0,0 +1 @@ +package hcserror diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go new file mode 100644 index 0000000000..a70d80da07 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go @@ -0,0 +1,52 @@ +//go:build windows + +package hcserror + +import ( + "errors" + "fmt" + + "golang.org/x/sys/windows" +) + +type HcsError struct { + title string + rest string + Err error +} + +func (e *HcsError) Error() string { + s := e.title + if len(s) > 0 && s[len(s)-1] != ' ' { + s += " " + } + s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, Win32FromError(e.Err)) + if e.rest != "" { + if e.rest[0] != ' ' { + s += " " + } + s += e.rest + } + return s +} + +func New(err error, title, rest string) error { + // Pass through DLL errors directly since they do not originate from HCS. + var e *windows.DLLError + if errors.As(err, &e) { + return err + } + return &HcsError{title, rest, err} +} + +func Win32FromError(err error) uint32 { + var herr *HcsError + if errors.As(err, &herr) { + return Win32FromError(herr.Err) + } + var code windows.Errno + if errors.As(err, &code) { + return uint32(code) + } + return uint32(windows.ERROR_GEN_FAILURE) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go new file mode 100644 index 0000000000..f6d35df0e5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go @@ -0,0 +1 @@ +package hns diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go new file mode 100644 index 0000000000..ec4c907d1f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go @@ -0,0 +1,23 @@ +package hns + +import "fmt" + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go hns.go + +//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? + +type EndpointNotFoundError struct { + EndpointName string +} + +func (e EndpointNotFoundError) Error() string { + return fmt.Sprintf("Endpoint %s not found", e.EndpointName) +} + +type NetworkNotFoundError struct { + NetworkName string +} + +func (e NetworkNotFoundError) Error() string { + return fmt.Sprintf("Network %s not found", e.NetworkName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go new file mode 100644 index 0000000000..593664419d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -0,0 +1,338 @@ +//go:build windows + +package hns + +import ( + "encoding/json" + "net" + "strings" + + "github.com/sirupsen/logrus" +) + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + VirtualNetwork string `json:",omitempty"` + VirtualNetworkName string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacAddress string `json:",omitempty"` + IPAddress net.IP `json:",omitempty"` + IPv6Address net.IP `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + DNSDomain string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + GatewayAddressV6 string `json:",omitempty"` + EnableInternalDNS bool `json:",omitempty"` + DisableICC bool `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` + IPv6PrefixLength uint8 `json:",omitempty"` + IsRemoteEndpoint bool `json:",omitempty"` + EnableLowMetric bool `json:",omitempty"` + Namespace *Namespace `json:",omitempty"` + EncapOverhead uint16 `json:",omitempty"` + SharedContainers []string `json:",omitempty"` +} + +// SystemType represents the type of the system on which actions are done +type SystemType string + +// SystemType const +const ( + ContainerType SystemType = "Container" + VirtualMachineType SystemType = "VirtualMachine" + HostType SystemType = "Host" +) + +// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type EndpointAttachDetachRequest struct { + ContainerID string `json:"ContainerId,omitempty"` + SystemType SystemType `json:"SystemType"` + CompartmentID uint16 `json:"CompartmentId,omitempty"` + VirtualNICName string `json:"VirtualNicName,omitempty"` +} + +// EndpointResquestResponse is object to get the endpoint request response +type EndpointResquestResponse struct { + Success bool + Error string +} + +// EndpointStats is the object that has stats for a given endpoint +type EndpointStats struct { + BytesReceived uint64 `json:"BytesReceived"` + BytesSent uint64 `json:"BytesSent"` + DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"` + DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"` + EndpointID string `json:"EndpointId"` + InstanceID string `json:"InstanceId"` + PacketsReceived uint64 `json:"PacketsReceived"` + PacketsSent uint64 `json:"PacketsSent"` +} + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + endpoint := &HNSEndpoint{} + err := hnsCall(method, "/endpoints/"+path, request, &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + var endpoint []HNSEndpoint + err := hnsCall("GET", "/endpoints/", "", &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID +func hnsEndpointStatsRequest(id string) (*EndpointStats, error) { + var stats EndpointStats + err := hnsCall("GET", "/endpointstats/"+id, "", &stats) + if err != nil { + return nil, err + } + + return &stats, nil +} + +// GetHNSEndpointByID get the Endpoint by ID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return HNSEndpointRequest("GET", endpointID, "") +} + +// GetHNSEndpointStats get the stats for a n Endpoint by ID +func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) { + return hnsEndpointStatsRequest(endpointID) +} + +// GetHNSEndpointByName gets the endpoint filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + hnsResponse, err := HNSListEndpointRequest() + if err != nil { + return nil, err + } + for _, hnsEndpoint := range hnsResponse { + if hnsEndpoint.Name == endpointName { + return &hnsEndpoint, nil + } + } + return nil, EndpointNotFoundError{EndpointName: endpointName} +} + +type endpointAttachInfo struct { + SharedContainers json.RawMessage `json:",omitempty"` +} + +func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) { + attachInfo := endpointAttachInfo{} + err := hnsCall("GET", "/endpoints/"+endpoint.Id, "", &attachInfo) + + // Return false allows us to just return the err + if err != nil { + return false, err + } + + if strings.Contains(strings.ToLower(string(attachInfo.SharedContainers)), strings.ToLower(vID)) { + return true, nil + } + + return false, nil +} + +// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods +func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { + operation := "Create" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + return HNSEndpointRequest("POST", "", string(jsonString)) +} + +// Delete Endpoint by sending EndpointRequest to HNS +func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { + operation := "Delete" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + return HNSEndpointRequest("DELETE", endpoint.Id, "") +} + +// Update Endpoint +func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { + operation := "Update" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint) + + return endpoint, err +} + +// ApplyACLPolicy applies a set of ACL Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { + operation := "ApplyACLPolicy" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + +// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error { + operation := "ApplyProxyPolicy" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + +// ContainerAttach attaches an endpoint to container +func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { + operation := "ContainerAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + CompartmentID: compartmentID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// ContainerDetach detaches an endpoint from container +func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error { + operation := "ContainerDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// HostAttach attaches a nic on the host +func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { + operation := "HostAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + CompartmentID: compartmentID, + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// HostDetach detaches a nic on the host +func (endpoint *HNSEndpoint) HostDetach() error { + operation := "HostDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// VirtualMachineNICAttach attaches a endpoint to a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error { + operation := "VirtualMachineNicAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + VirtualNICName: virtualMachineNICName, + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// VirtualMachineNICDetach detaches a endpoint from a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error { + operation := "VirtualMachineNicDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go new file mode 100644 index 0000000000..0a8f36d832 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go @@ -0,0 +1,51 @@ +//go:build windows + +package hns + +import ( + "encoding/json" + "fmt" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { + var responseBuffer *uint16 + logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) + + err := _hnsCall(method, path, request, &responseBuffer) + if err != nil { + return nil, hcserror.New(err, "hnsCall ", "") + } + response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) + + hnsresponse := &hnsResponse{} + if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { + return nil, err + } + return hnsresponse, nil +} + +func hnsCall(method, path, request string, returnResponse interface{}) error { + hnsresponse, err := hnsCallRawResponse(method, path, request) + if err != nil { + return fmt.Errorf("failed during hnsCallRawResponse: %v", err) + } + if !hnsresponse.Success { + return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) + } + + if len(hnsresponse.Output) == 0 { + return nil + } + + logrus.Debugf("Network Response : %s", hnsresponse.Output) + err = json.Unmarshal(hnsresponse.Output, returnResponse) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go new file mode 100644 index 0000000000..464bb8954f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go @@ -0,0 +1,30 @@ +//go:build windows + +package hns + +type HNSGlobals struct { + Version HNSVersion `json:"Version"` +} + +type HNSVersion struct { + Major int `json:"Major"` + Minor int `json:"Minor"` +} + +var ( + HNSVersion1803 = HNSVersion{Major: 7, Minor: 2} +) + +func GetHNSGlobals() (*HNSGlobals, error) { + var version HNSVersion + err := hnsCall("GET", "/globals/version", "", &version) + if err != nil { + return nil, err + } + + globals := &HNSGlobals{ + Version: version, + } + + return globals, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go new file mode 100644 index 0000000000..8861faee7a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go @@ -0,0 +1,144 @@ +//go:build windows + +package hns + +import ( + "encoding/json" + "errors" + "net" + + "github.com/sirupsen/logrus" +) + +// Subnet is associated with a network and represents a list +// of subnets available to the network +type Subnet struct { + AddressPrefix string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` +} + +// MacPool is associated with a network and represents a list +// of macaddresses available to the network +type MacPool struct { + StartMacAddress string `json:",omitempty"` + EndMacAddress string `json:",omitempty"` +} + +// HNSNetwork represents a network in HNS +type HNSNetwork struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + Type string `json:",omitempty"` + NetworkAdapterName string `json:",omitempty"` + SourceMac string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacPools []MacPool `json:",omitempty"` + Subnets []Subnet `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + DNSServerCompartment uint32 `json:",omitempty"` + ManagementIP string `json:",omitempty"` + AutomaticDNS bool `json:",omitempty"` +} + +type hnsResponse struct { + Success bool + Error string + Output json.RawMessage +} + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + var network HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return &network, nil +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + var network []HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return network, nil +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return HNSNetworkRequest("GET", networkID, "") +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + hsnnetworks, err := HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + for _, hnsnetwork := range hsnnetworks { + if hnsnetwork.Name == networkName { + return &hnsnetwork, nil + } + } + return nil, NetworkNotFoundError{NetworkName: networkName} +} + +// Create Network by sending NetworkRequest to HNS. +func (network *HNSNetwork) Create() (*HNSNetwork, error) { + operation := "Create" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + for _, subnet := range network.Subnets { + if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") { + return nil, errors.New("network create error, subnet has address prefix but no gateway specified") + } + } + + jsonString, err := json.Marshal(network) + if err != nil { + return nil, err + } + return HNSNetworkRequest("POST", "", string(jsonString)) +} + +// Delete Network by sending NetworkRequest to HNS +func (network *HNSNetwork) Delete() (*HNSNetwork, error) { + operation := "Delete" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + return HNSNetworkRequest("DELETE", network.Id, "") +} + +// Creates an endpoint on the Network. +func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint { + return &HNSEndpoint{ + VirtualNetwork: network.Id, + IPAddress: ipAddress, + MacAddress: string(macAddress), + } +} + +func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateEndpoint" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id) + + endpoint.VirtualNetwork = network.Id + return endpoint.Create() +} + +func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateRemoteEndpoint" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + endpoint.IsRemoteEndpoint = true + return network.CreateEndpoint(endpoint) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go new file mode 100644 index 0000000000..082c018a4e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -0,0 +1,110 @@ +package hns + +// Type of Request Support in ModifySystem +type PolicyType string + +// RequestType const +const ( + Nat PolicyType = "NAT" + ACL PolicyType = "ACL" + PA PolicyType = "PA" + VLAN PolicyType = "VLAN" + VSID PolicyType = "VSID" + VNet PolicyType = "VNET" + L2Driver PolicyType = "L2Driver" + Isolation PolicyType = "Isolation" + QOS PolicyType = "QOS" + OutboundNat PolicyType = "OutBoundNAT" + ExternalLoadBalancer PolicyType = "ELB" + Route PolicyType = "ROUTE" + Proxy PolicyType = "PROXY" +) + +type NatPolicy struct { + Type PolicyType `json:"Type"` + Protocol string `json:",omitempty"` + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + ExternalPortReserved bool `json:",omitempty"` +} + +type QosPolicy struct { + Type PolicyType `json:"Type"` + MaximumOutgoingBandwidthInBytes uint64 +} + +type IsolationPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint + VSID uint + InDefaultIsolation bool +} + +type VlanPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint +} + +type VsidPolicy struct { + Type PolicyType `json:"Type"` + VSID uint +} + +type PaPolicy struct { + Type PolicyType `json:"Type"` + PA string `json:"PA"` +} + +type OutboundNatPolicy struct { + Policy + VIP string `json:"VIP,omitempty"` + Exceptions []string `json:"ExceptionList,omitempty"` + Destinations []string `json:",omitempty"` +} + +type ProxyPolicy struct { + Type PolicyType `json:"Type"` + IP string `json:",omitempty"` + Port string `json:",omitempty"` + ExceptionList []string `json:",omitempty"` + Destination string `json:",omitempty"` + OutboundNat bool `json:",omitempty"` +} + +type ActionType string +type DirectionType string +type RuleType string + +const ( + Allow ActionType = "Allow" + Block ActionType = "Block" + + In DirectionType = "In" + Out DirectionType = "Out" + + Host RuleType = "Host" + Switch RuleType = "Switch" +) + +type ACLPolicy struct { + Type PolicyType `json:"Type"` + Id string `json:"Id,omitempty"` + Protocol uint16 `json:",omitempty"` + Protocols string `json:"Protocols,omitempty"` + InternalPort uint16 `json:",omitempty"` + Action ActionType + Direction DirectionType + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:"LocalPorts,omitempty"` + LocalPort uint16 `json:",omitempty"` + RemotePorts string `json:"RemotePorts,omitempty"` + RemotePort uint16 `json:",omitempty"` + RuleType RuleType `json:"RuleType,omitempty"` + Priority uint16 `json:",omitempty"` + ServiceName string `json:",omitempty"` +} + +type Policy struct { + Type PolicyType `json:"Type"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go new file mode 100644 index 0000000000..b98db40e8d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go @@ -0,0 +1,203 @@ +//go:build windows + +package hns + +import ( + "encoding/json" + + "github.com/sirupsen/logrus" +) + +// RoutePolicy is a structure defining schema for Route based Policy +type RoutePolicy struct { + Policy + DestinationPrefix string `json:"DestinationPrefix,omitempty"` + NextHop string `json:"NextHop,omitempty"` + EncapEnabled bool `json:"NeedEncap,omitempty"` +} + +// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy +type ELBPolicy struct { + LBPolicy + SourceVIP string `json:"SourceVIP,omitempty"` + VIPs []string `json:"VIPs,omitempty"` + ILB bool `json:"ILB,omitempty"` + DSR bool `json:"IsDSR,omitempty"` +} + +// LBPolicy is a structure defining schema for LoadBalancing based Policy +type LBPolicy struct { + Policy + Protocol uint16 `json:"Protocol,omitempty"` + InternalPort uint16 + ExternalPort uint16 +} + +// PolicyList is a structure defining schema for Policy list request +type PolicyList struct { + ID string `json:"ID,omitempty"` + EndpointReferences []string `json:"References,omitempty"` + Policies []json.RawMessage `json:"Policies,omitempty"` +} + +// HNSPolicyListRequest makes a call into HNS to update/query a single network +func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { + var policy PolicyList + err := hnsCall(method, "/policylists/"+path, request, &policy) + if err != nil { + return nil, err + } + + return &policy, nil +} + +// HNSListPolicyListRequest gets all the policy list +func HNSListPolicyListRequest() ([]PolicyList, error) { + var plist []PolicyList + err := hnsCall("GET", "/policylists/", "", &plist) + if err != nil { + return nil, err + } + + return plist, nil +} + +// PolicyListRequest makes a HNS call to modify/query a network policy list +func PolicyListRequest(method, path, request string) (*PolicyList, error) { + policylist := &PolicyList{} + err := hnsCall(method, "/policylists/"+path, request, &policylist) + if err != nil { + return nil, err + } + + return policylist, nil +} + +// GetPolicyListByID get the policy list by ID +func GetPolicyListByID(policyListID string) (*PolicyList, error) { + return PolicyListRequest("GET", policyListID, "") +} + +// Create PolicyList by sending PolicyListRequest to HNS. +func (policylist *PolicyList) Create() (*PolicyList, error) { + operation := "Create" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + jsonString, err := json.Marshal(policylist) + if err != nil { + return nil, err + } + return PolicyListRequest("POST", "", string(jsonString)) +} + +// Delete deletes PolicyList +func (policylist *PolicyList) Delete() (*PolicyList, error) { + operation := "Delete" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + + return PolicyListRequest("DELETE", policylist.ID, "") +} + +// AddEndpoint add an endpoint to a Policy List +func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "AddEndpoint" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + + return policylist.Create() +} + +// RemoveEndpoint removes an endpoint from the Policy List +func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "RemoveEndpoint" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + elementToRemove := "/endpoints/" + endpoint.Id + + var references []string + + for _, endpointReference := range policylist.EndpointReferences { + if endpointReference == elementToRemove { + continue + } + references = append(references, endpointReference) + } + policylist.EndpointReferences = references + return policylist.Create() +} + +// AddLoadBalancer policy list for the specified endpoints +func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { + operation := "AddLoadBalancer" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) + + policylist := &PolicyList{} + + elbPolicy := &ELBPolicy{ + SourceVIP: sourceVIP, + ILB: isILB, + } + + if len(vip) > 0 { + elbPolicy.VIPs = []string{vip} + } + elbPolicy.Type = ExternalLoadBalancer + elbPolicy.Protocol = protocol + elbPolicy.InternalPort = internalPort + elbPolicy.ExternalPort = externalPort + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(elbPolicy) + if err != nil { + return nil, err + } + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} + +// AddRoute adds route policy list for the specified endpoints +func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { + operation := "AddRoute" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix) + + policylist := &PolicyList{} + + rPolicy := &RoutePolicy{ + DestinationPrefix: destinationPrefix, + NextHop: nextHop, + EncapEnabled: encapEnabled, + } + rPolicy.Type = Route + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(rPolicy) + if err != nil { + return nil, err + } + + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go new file mode 100644 index 0000000000..b9c30b9019 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go @@ -0,0 +1,51 @@ +//go:build windows + +package hns + +import ( + "github.com/sirupsen/logrus" +) + +type HNSSupportedFeatures struct { + Acl HNSAclFeatures `json:"ACL"` +} + +type HNSAclFeatures struct { + AclAddressLists bool `json:"AclAddressLists"` + AclNoHostRulePriority bool `json:"AclHostRulePriority"` + AclPortRanges bool `json:"AclPortRanges"` + AclRuleId bool `json:"AclRuleId"` +} + +func GetHNSSupportedFeatures() HNSSupportedFeatures { + var hnsFeatures HNSSupportedFeatures + + globals, err := GetHNSGlobals() + if err != nil { + // Expected on pre-1803 builds, all features will be false/unsupported + logrus.Debugf("Unable to obtain HNS globals: %s", err) + return hnsFeatures + } + + hnsFeatures.Acl = HNSAclFeatures{ + AclAddressLists: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclNoHostRulePriority: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclPortRanges: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclRuleId: isHNSFeatureSupported(globals.Version, HNSVersion1803), + } + + return hnsFeatures +} + +func isHNSFeatureSupported(currentVersion HNSVersion, minVersionSupported HNSVersion) bool { + if currentVersion.Major < minVersionSupported.Major { + return false + } + if currentVersion.Major > minVersionSupported.Major { + return true + } + if currentVersion.Minor < minVersionSupported.Minor { + return false + } + return true +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go new file mode 100644 index 0000000000..749588ad39 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -0,0 +1,113 @@ +//go:build windows + +package hns + +import ( + "encoding/json" + "fmt" + "os" + "path" + "strings" +) + +type namespaceRequest struct { + IsDefault bool `json:",omitempty"` +} + +type namespaceEndpointRequest struct { + ID string `json:"Id"` +} + +type NamespaceResource struct { + Type string + Data json.RawMessage +} + +type namespaceResourceRequest struct { + Type string + Data interface{} +} + +type Namespace struct { + ID string + IsDefault bool `json:",omitempty"` + ResourceList []NamespaceResource `json:",omitempty"` + CompartmentId uint32 `json:",omitempty"` +} + +func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) { + var err error + hnspath := "/namespaces/" + if id != nil { + hnspath = path.Join(hnspath, *id) + } + if subpath != "" { + hnspath = path.Join(hnspath, subpath) + } + var reqJSON []byte + if request != nil { + if reqJSON, err = json.Marshal(request); err != nil { + return nil, err + } + } + var ns Namespace + err = hnsCall(method, hnspath, string(reqJSON), &ns) + if err != nil { + if strings.Contains(err.Error(), "Element not found.") { + return nil, os.ErrNotExist + } + return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) + } + return &ns, err +} + +func CreateNamespace() (string, error) { + req := namespaceRequest{} + ns, err := issueNamespaceRequest(nil, "POST", "", &req) + if err != nil { + return "", err + } + return ns.ID, nil +} + +func RemoveNamespace(id string) error { + _, err := issueNamespaceRequest(&id, "DELETE", "", nil) + return err +} + +func GetNamespaceEndpoints(id string) ([]string, error) { + ns, err := issueNamespaceRequest(&id, "GET", "", nil) + if err != nil { + return nil, err + } + var endpoints []string + for _, rsrc := range ns.ResourceList { + if rsrc.Type == "Endpoint" { + var endpoint namespaceEndpointRequest + err = json.Unmarshal(rsrc.Data, &endpoint) + if err != nil { + return nil, fmt.Errorf("unmarshal endpoint: %s", err) + } + endpoints = append(endpoints, endpoint.ID) + } + } + return endpoints, nil +} + +func AddNamespaceEndpoint(id string, endpointID string) error { + resource := namespaceResourceRequest{ + Type: "Endpoint", + Data: namespaceEndpointRequest{endpointID}, + } + _, err := issueNamespaceRequest(&id, "POST", "addresource", &resource) + return err +} + +func RemoveNamespaceEndpoint(id string, endpointID string) error { + resource := namespaceResourceRequest{ + Type: "Endpoint", + Data: namespaceEndpointRequest{endpointID}, + } + _, err := issueNamespaceRequest(&id, "POST", "removeresource", &resource) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go new file mode 100644 index 0000000000..a35ee945db --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go @@ -0,0 +1,80 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package hns + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procHNSCall = modvmcompute.NewProc("HNSCall") +) + +func _hnsCall(method string, path string, object string, response **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(method) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(object) + if hr != nil { + return + } + return __hnsCall(_p0, _p1, _p2, response) +} + +func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { + hr = procHNSCall.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go new file mode 100644 index 0000000000..cb554867fe --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go @@ -0,0 +1 @@ +package interop diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go new file mode 100644 index 0000000000..a564696568 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go @@ -0,0 +1,25 @@ +//go:build windows + +package interop + +import ( + "syscall" + "unsafe" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go interop.go + +//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree + +func ConvertAndFreeCoTaskMemString(buffer *uint16) string { + str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:]) + coTaskMemFree(unsafe.Pointer(buffer)) + return str +} + +func Win32FromHresult(hr uintptr) syscall.Errno { + if hr&0x1fff0000 == 0x00070000 { + return syscall.Errno(hr & 0xffff) + } + return syscall.Errno(hr) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go new file mode 100644 index 0000000000..a17a112508 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go @@ -0,0 +1,51 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package interop + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll") + + procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree") +) + +func coTaskMemFree(buffer unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go new file mode 100644 index 0000000000..34b53d6e48 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go @@ -0,0 +1,8 @@ +// This package provides higher level constructs for the win32 job object API. +// Most of the core creation and management functions are already present in "golang.org/x/sys/windows" +// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information +// structs and associated limit flags. Whatever is not present from the job object API +// in golang.org/x/sys/windows is located in /internal/winapi. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects +package jobobject diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go new file mode 100644 index 0000000000..bcca84b0da --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go @@ -0,0 +1,113 @@ +//go:build windows + +package jobobject + +import ( + "context" + "fmt" + "sync" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/queue" + "github.com/Microsoft/hcsshim/internal/winapi" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ioInitOnce sync.Once + initIOErr error + // Global iocp handle that will be re-used for every job object + ioCompletionPort windows.Handle + // Mapping of job handle to queue to place notifications in. + jobMap sync.Map +) + +// MsgAllProcessesExited is a type representing a message that every process in a job has exited. +type MsgAllProcessesExited struct{} + +// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently. +// This should not be treated as an error. +type MsgUnimplemented struct{} + +// pollIOCP polls the io completion port forever. +func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { + var ( + overlapped uintptr + code uint32 + key uintptr + ) + + for { + err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE) + if err != nil { + log.G(ctx).WithError(err).Error("failed to poll for job object message") + continue + } + if val, ok := jobMap.Load(key); ok { + msq, ok := val.(*queue.MessageQueue) + if !ok { + log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map") + continue + } + notification, err := parseMessage(code, overlapped) + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "code": code, + "overlapped": overlapped, + }).Warn("failed to parse job object message") + continue + } + if err := msq.Enqueue(notification); err == queue.ErrQueueClosed { + // Write will only return an error when the queue is closed. + // The only time a queue would ever be closed is when we call `Close` on + // the job it belongs to which also removes it from the jobMap, so something + // went wrong here. We can't return as this is reading messages for all jobs + // so just log it and move on. + log.G(ctx).WithFields(logrus.Fields{ + "code": code, + "overlapped": overlapped, + }).Warn("tried to write to a closed queue") + continue + } + } else { + log.G(ctx).Warn("received a message for a job not present in the mapping") + } + } +} + +func parseMessage(code uint32, overlapped uintptr) (interface{}, error) { + // Check code and parse out relevant information related to that notification + // that we care about. For now all we handle is the message that all processes + // in the job have exited. + switch code { + case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO: + return MsgAllProcessesExited{}, nil + // Other messages for completeness and a check to make sure that if we fall + // into the default case that this is a code we don't know how to handle. + case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME: + case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME: + case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT: + case winapi.JOB_OBJECT_MSG_NEW_PROCESS: + case winapi.JOB_OBJECT_MSG_EXIT_PROCESS: + case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS: + case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT: + case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT: + case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT: + default: + return nil, fmt.Errorf("unknown job notification type: %d", code) + } + return MsgUnimplemented{}, nil +} + +// Assigns an IO completion port to get notified of events for the registered job +// object. +func attachIOCP(job windows.Handle, iocp windows.Handle) error { + info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{ + CompletionKey: job, + CompletionPort: iocp, + } + _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go new file mode 100644 index 0000000000..4a224fbece --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -0,0 +1,669 @@ +//go:build windows + +package jobobject + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "sync" + "sync/atomic" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/queue" + "github.com/Microsoft/hcsshim/internal/winapi" + "golang.org/x/sys/windows" +) + +// JobObject is a high level wrapper around a Windows job object. Holds a handle to +// the job, a queue to receive iocp notifications about the lifecycle +// of the job and a mutex for synchronized handle access. +type JobObject struct { + handle windows.Handle + // All accesses to this MUST be done atomically except in `Open` as the object + // is being created in the function. 1 signifies that this job is currently a silo. + silo uint32 + mq *queue.MessageQueue + handleLock sync.RWMutex +} + +// JobLimits represents the resource constraints that can be applied to a job object. +type JobLimits struct { + CPULimit uint32 + CPUWeight uint32 + MemoryLimitInBytes uint64 + MaxIOPS int64 + MaxBandwidth int64 +} + +type CPURateControlType uint32 + +const ( + WeightBased CPURateControlType = iota + RateBased +) + +// Processor resource controls +const ( + cpuLimitMin = 1 + cpuLimitMax = 10000 + cpuWeightMin = 1 + cpuWeightMax = 9 +) + +var ( + ErrAlreadyClosed = errors.New("the handle has already been closed") + ErrNotRegistered = errors.New("job is not registered to receive notifications") + ErrNotSilo = errors.New("job is not a silo") +) + +// Options represents the set of configurable options when making or opening a job object. +type Options struct { + // `Name` specifies the name of the job object if a named job object is desired. + Name string + // `Notifications` specifies if the job will be registered to receive notifications. + // Defaults to false. + Notifications bool + // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject. + // Defaults to false. + UseNTVariant bool + // `Silo` specifies to promote the job to a silo. This additionally sets the flag + // JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE as it is required for the upgrade to complete. + Silo bool + // `IOTracking` enables tracking I/O statistics on the job object. More specifically this + // calls SetInformationJobObject with the JobObjectIoAttribution class. + EnableIOTracking bool +} + +// Create creates a job object. +// +// If options.Name is an empty string, the job will not be assigned a name. +// +// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`. +// +// If `options` is nil, use default option values. +// +// Returns a JobObject structure and an error if there is one. +func Create(ctx context.Context, options *Options) (_ *JobObject, err error) { + if options == nil { + options = &Options{} + } + + var jobName *winapi.UnicodeString + if options.Name != "" { + jobName, err = winapi.NewUnicodeString(options.Name) + if err != nil { + return nil, err + } + } + + var jobHandle windows.Handle + if options.UseNTVariant { + oa := winapi.ObjectAttributes{ + Length: unsafe.Sizeof(winapi.ObjectAttributes{}), + ObjectName: jobName, + Attributes: 0, + } + status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + } else { + var jobNameBuf *uint16 + if jobName != nil && jobName.Buffer != nil { + jobNameBuf = jobName.Buffer + } + jobHandle, err = windows.CreateJobObject(nil, jobNameBuf) + if err != nil { + return nil, err + } + } + + defer func() { + if err != nil { + windows.Close(jobHandle) + } + }() + + job := &JobObject{ + handle: jobHandle, + } + + // If the IOCP we'll be using to receive messages for all jobs hasn't been + // created, create it and start polling. + if options.Notifications { + mq, err := setupNotifications(ctx, job) + if err != nil { + return nil, err + } + job.mq = mq + } + + if options.EnableIOTracking { + if err := enableIOTracking(jobHandle); err != nil { + return nil, err + } + } + + if options.Silo { + // This is a required setting for upgrading to a silo. + if err := job.SetTerminateOnLastHandleClose(); err != nil { + return nil, err + } + if err := job.PromoteToSilo(); err != nil { + return nil, err + } + } + + return job, nil +} + +// Open opens an existing job object with name provided in `options`. If no name is provided +// return an error since we need to know what job object to open. +// +// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`. +// +// Returns a JobObject structure and an error if there is one. +func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { + if options == nil || options.Name == "" { + return nil, errors.New("no job object name specified to open") + } + + unicodeJobName, err := winapi.NewUnicodeString(options.Name) + if err != nil { + return nil, err + } + + var jobHandle windows.Handle + if options.UseNTVariant { + oa := winapi.ObjectAttributes{ + Length: unsafe.Sizeof(winapi.ObjectAttributes{}), + ObjectName: unicodeJobName, + Attributes: 0, + } + status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + } else { + jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, 0, unicodeJobName.Buffer) + if err != nil { + return nil, err + } + } + + defer func() { + if err != nil { + windows.Close(jobHandle) + } + }() + + job := &JobObject{ + handle: jobHandle, + } + + if isJobSilo(jobHandle) { + job.silo = 1 + } + + // If the IOCP we'll be using to receive messages for all jobs hasn't been + // created, create it and start polling. + if options.Notifications { + mq, err := setupNotifications(ctx, job) + if err != nil { + return nil, err + } + job.mq = mq + } + + return job, nil +} + +// helper function to setup notifications for creating/opening a job object +func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + ioInitOnce.Do(func() { + h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + initIOErr = err + return + } + ioCompletionPort = h + go pollIOCP(ctx, h) + }) + + if initIOErr != nil { + return nil, initIOErr + } + + mq := queue.NewMessageQueue() + jobMap.Store(uintptr(job.handle), mq) + if err := attachIOCP(job.handle, ioCompletionPort); err != nil { + jobMap.Delete(uintptr(job.handle)) + return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err) + } + return mq, nil +} + +// PollNotification will poll for a job object notification. This call should only be called once +// per job (ideally in a goroutine loop) and will block if there is not a notification ready. +// This call will return immediately with error `ErrNotRegistered` if the job was not registered +// to receive notifications during `Create`. Internally, messages will be queued and there +// is no worry of messages being dropped. +func (job *JobObject) PollNotification() (interface{}, error) { + if job.mq == nil { + return nil, ErrNotRegistered + } + return job.mq.Dequeue() +} + +// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to +// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process +// has already started running. +func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if err := attrList.Update( + winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST, + unsafe.Pointer(&job.handle), + unsafe.Sizeof(job.handle), + ); err != nil { + return fmt.Errorf("failed to update proc thread attributes for job object: %w", err) + } + + return nil +} + +// Close closes the job object handle. +func (job *JobObject) Close() error { + job.handleLock.Lock() + defer job.handleLock.Unlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if err := windows.Close(job.handle); err != nil { + return err + } + + if job.mq != nil { + job.mq.Close() + } + // Handles now invalid so if the map entry to receive notifications for this job still + // exists remove it so we can stop receiving notifications. + if _, ok := jobMap.Load(uintptr(job.handle)); ok { + jobMap.Delete(uintptr(job.handle)) + } + + job.handle = 0 + return nil +} + +// Assign assigns a process to the job object. +func (job *JobObject) Assign(pid uint32) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if pid == 0 { + return errors.New("invalid pid: 0") + } + hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid) + if err != nil { + return err + } + defer windows.Close(hProc) + return windows.AssignProcessToJobObject(job.handle, hProc) +} + +// Terminate terminates the job, essentially calls TerminateProcess on every process in the +// job. +func (job *JobObject) Terminate(exitCode uint32) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + if job.handle == 0 { + return ErrAlreadyClosed + } + return windows.TerminateJobObject(job.handle, exitCode) +} + +// Pids returns all of the process IDs in the job object. +func (job *JobObject) Pids() ([]uint32, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{} + err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicProcessIdList, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ) + + // This is either the case where there is only one process or no processes in + // the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList + // is 1 and just return this, otherwise return an empty slice. + if err == nil { + if info.NumberOfProcessIdsInList == 1 { + return []uint32{uint32(info.ProcessIdList[0])}, nil + } + // Return empty slice instead of nil to play well with the caller of this. + // Do not return an error if no processes are running inside the job + return []uint32{}, nil + } + + if err != winapi.ERROR_MORE_DATA { + return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err) + } + + jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1)) + buf := make([]byte, jobBasicProcessIDListSize) + if err = winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicProcessIdList, + unsafe.Pointer(&buf[0]), + uint32(len(buf)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err) + } + + bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0])) + pids := make([]uint32, bufInfo.NumberOfProcessIdsInList) + for i, bufPid := range bufInfo.AllPids() { + pids[i] = uint32(bufPid) + } + return pids, nil +} + +// QueryMemoryStats gets the memory stats for the job object. +func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectMemoryUsageInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object memory stats: %w", err) + } + return &info, nil +} + +// QueryProcessorStats gets the processor stats for the job object. +func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicAccountingInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object process stats: %w", err) + } + return &info, nil +} + +// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error +// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking() +// hasn't been called since creation of the job. +func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ + ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, + } + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectIoAttribution, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object storage stats: %w", err) + } + return &info, nil +} + +// ApplyFileBinding makes a file binding using the Bind Filter from target to root. If the job has +// not been upgraded to a silo this call will fail. The binding is only applied and visible for processes +// running in the job, any processes on the host or in another job will not be able to see the binding. +func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if !job.isSilo() { + return ErrNotSilo + } + + // The parent directory needs to exist for the bind to work. MkdirAll stats and + // returns nil if the directory exists internally so we should be fine to mkdirall + // every time. + if err := os.MkdirAll(filepath.Dir(root), 0); err != nil { + return err + } + + rootPtr, err := windows.UTF16PtrFromString(root) + if err != nil { + return err + } + + targetPtr, err := windows.UTF16PtrFromString(target) + if err != nil { + return err + } + + flags := winapi.BINDFLT_FLAG_USE_CURRENT_SILO_MAPPING + if readOnly { + flags |= winapi.BINDFLT_FLAG_READ_ONLY_MAPPING + } + + if err := winapi.BfSetupFilter( + job.handle, + flags, + rootPtr, + targetPtr, + nil, + 0, + ); err != nil { + return fmt.Errorf("failed to bind target %q to root %q for job object: %w", target, root, err) + } + return nil +} + +// isJobSilo is a helper to determine if a job object that was opened is a silo. This should ONLY be called +// from `Open` and any callers in this package afterwards should use `job.isSilo()` +func isJobSilo(h windows.Handle) bool { + // None of the information from the structure that this info class expects will be used, this is just used as + // the call will fail if the job hasn't been upgraded to a silo so we can use this to tell when we open a job + // if it's a silo or not. Because none of the info matters simply define a dummy struct with the size that the call + // expects which is 16 bytes. + type isSiloObj struct { + _ [16]byte + } + var siloInfo isSiloObj + err := winapi.QueryInformationJobObject( + h, + winapi.JobObjectSiloBasicInformation, + unsafe.Pointer(&siloInfo), + uint32(unsafe.Sizeof(siloInfo)), + nil, + ) + return err == nil +} + +// PromoteToSilo promotes a job object to a silo. There must be no running processess +// in the job for this to succeed. If the job is already a silo this is a no-op. +func (job *JobObject) PromoteToSilo() error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if job.isSilo() { + return nil + } + + pids, err := job.Pids() + if err != nil { + return err + } + + if len(pids) != 0 { + return fmt.Errorf("job cannot have running processes to be promoted to a silo, found %d running processes", len(pids)) + } + + _, err = windows.SetInformationJobObject( + job.handle, + winapi.JobObjectCreateSilo, + 0, + 0, + ) + if err != nil { + return fmt.Errorf("failed to promote job to silo: %w", err) + } + + atomic.StoreUint32(&job.silo, 1) + return nil +} + +// isSilo returns if the job object is a silo. +func (job *JobObject) isSilo() bool { + return atomic.LoadUint32(&job.silo) == 1 +} + +// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the +// private working set for every process running in the job. +func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { + pids, err := job.Pids() + if err != nil { + return 0, err + } + + openAndQueryWorkingSet := func(pid uint32) (uint64, error) { + h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid) + if err != nil { + // Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a + // case where one of the pids in the job exited before we open. + return 0, nil + } + defer func() { + _ = windows.Close(h) + }() + // Check if the process is actually running in the job still. There's a small chance + // that the process could have exited and had its pid re-used between grabbing the pids + // in the job and opening the handle to it above. + var inJob int32 + if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil { + // This shouldn't fail unless we have incorrect access rights which we control + // here so probably best to error out if this failed. + return 0, err + } + // Don't report stats for this process as it's not running in the job. This shouldn't be + // an error condition though. + if inJob == 0 { + return 0, nil + } + + var vmCounters winapi.VM_COUNTERS_EX2 + status := winapi.NtQueryInformationProcess( + h, + winapi.ProcessVmCounters, + unsafe.Pointer(&vmCounters), + uint32(unsafe.Sizeof(vmCounters)), + nil, + ) + if !winapi.NTSuccess(status) { + return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status)) + } + return uint64(vmCounters.PrivateWorkingSetSize), nil + } + + var jobWorkingSetSize uint64 + for _, pid := range pids { + workingSet, err := openAndQueryWorkingSet(pid) + if err != nil { + return 0, err + } + jobWorkingSetSize += workingSet + } + + return jobWorkingSetSize, nil +} + +// SetIOTracking enables IO tracking for processes in the job object. +// This enables use of the QueryStorageStats method. +func (job *JobObject) SetIOTracking() error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + return enableIOTracking(job.handle) +} + +func enableIOTracking(job windows.Handle) error { + info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ + ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, + } + if _, err := windows.SetInformationJobObject( + job, + winapi.JobObjectIoAttribution, + uintptr(unsafe.Pointer(&info)), + uint32(unsafe.Sizeof(info)), + ); err != nil { + return fmt.Errorf("failed to enable IO tracking on job object: %w", err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go new file mode 100644 index 0000000000..03f71d9a42 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -0,0 +1,317 @@ +//go:build windows + +package jobobject + +import ( + "errors" + "fmt" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/winapi" + "golang.org/x/sys/windows" +) + +const ( + memoryLimitMax uint64 = 0xffffffffffffffff +) + +func isFlagSet(flag, controlFlags uint32) bool { + return (flag & controlFlags) == flag +} + +// SetResourceLimits sets resource limits on the job object (cpu, memory, storage). +func (job *JobObject) SetResourceLimits(limits *JobLimits) error { + // Go through and check what limits were specified and apply them to the job. + if limits.MemoryLimitInBytes != 0 { + if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil { + return fmt.Errorf("failed to set job object memory limit: %w", err) + } + } + + if limits.CPULimit != 0 { + if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil { + return fmt.Errorf("failed to set job object cpu limit: %w", err) + } + } else if limits.CPUWeight != 0 { + if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil { + return fmt.Errorf("failed to set job object cpu limit: %w", err) + } + } + + if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 { + if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil { + return fmt.Errorf("failed to set io limit on job object: %w", err) + } + } + return nil +} + +// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate +// all processes in the job on the last open handle being closed. +func (job *JobObject) SetTerminateOnLastHandleClose() error { + info, err := job.getExtendedInformation() + if err != nil { + return err + } + info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE + return job.setExtendedInformation(info) +} + +// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`. +func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error { + if memoryLimitInBytes >= memoryLimitMax { + return errors.New("memory limit specified exceeds the max size") + } + + info, err := job.getExtendedInformation() + if err != nil { + return err + } + + info.JobMemoryLimit = uintptr(memoryLimitInBytes) + info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY + return job.setExtendedInformation(info) +} + +// GetMemoryLimit gets the memory limit in bytes of the job object. +func (job *JobObject) GetMemoryLimit() (uint64, error) { + info, err := job.getExtendedInformation() + if err != nil { + return 0, err + } + return uint64(info.JobMemoryLimit), nil +} + +// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to +// `rateControlValue` for the job object. +func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error { + cpuInfo, err := job.getCPURateControlInformation() + if err != nil { + return err + } + switch rateControlType { + case WeightBased: + if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax { + return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue) + } + cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + cpuInfo.Value = rateControlValue + case RateBased: + if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax { + return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue) + } + cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + cpuInfo.Value = rateControlValue + default: + return errors.New("invalid job object cpu rate control type") + } + return job.setCPURateControlInfo(cpuInfo) +} + +// GetCPULimit gets the cpu limits for the job object. +// `rateControlType` is used to indicate what type of cpu limit to query for. +func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) { + info, err := job.getCPURateControlInformation() + if err != nil { + return 0, err + } + + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) { + return 0, errors.New("the job does not have cpu rate control enabled") + } + + switch rateControlType { + case WeightBased: + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) { + return 0, errors.New("cannot get cpu weight for job object without cpu weight option set") + } + case RateBased: + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) { + return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set") + } + default: + return 0, errors.New("invalid job object cpu rate control type") + } + return info.Value, nil +} + +// SetCPUAffinity sets the processor affinity for the job object. +// The affinity is passed in as a bitmask. +func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { + info, err := job.getExtendedInformation() + if err != nil { + return err + } + info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) + info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) + return job.setExtendedInformation(info) +} + +// GetCPUAffinity gets the processor affinity for the job object. +// The returned affinity is a bitmask. +func (job *JobObject) GetCPUAffinity() (uint64, error) { + info, err := job.getExtendedInformation() + if err != nil { + return 0, err + } + return uint64(info.BasicLimitInformation.Affinity), nil +} + +// SetIOLimit sets the IO limits specified on the job object. +func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error { + ioInfo, err := job.getIOLimit() + if err != nil { + return err + } + ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE + if maxBandwidth != 0 { + ioInfo.MaxBandwidth = maxBandwidth + } + if maxIOPS != 0 { + ioInfo.MaxIops = maxIOPS + } + return job.setIORateControlInfo(ioInfo) +} + +// GetIOMaxBandwidthLimit gets the max bandwidth for the job object. +func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) { + info, err := job.getIOLimit() + if err != nil { + return 0, err + } + return info.MaxBandwidth, nil +} + +// GetIOMaxIopsLimit gets the max iops for the job object. +func (job *JobObject) GetIOMaxIopsLimit() (int64, error) { + info, err := job.getIOLimit() + if err != nil { + return 0, err + } + return info.MaxIops, nil +} + +// Helper function for getting a job object's extended information. +func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + windows.JobObjectExtendedLimitInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", info, err) + } + return &info, nil +} + +// Helper function for getting a job object's CPU rate control information. +func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + windows.JobObjectCpuRateControlInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", info, err) + } + return &info, nil +} + +// Helper function for setting a job object's extended information. +func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if _, err := windows.SetInformationJobObject( + job.handle, + windows.JobObjectExtendedLimitInformation, + uintptr(unsafe.Pointer(info)), + uint32(unsafe.Sizeof(*info)), + ); err != nil { + return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err) + } + return nil +} + +// Helper function for querying job handle for IO limit information. +func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{} + var blockCount uint32 = 1 + + if _, err := winapi.QueryIoRateControlInformationJobObject( + job.handle, + nil, + &ioInfo, + &blockCount, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err) + } + + if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) { + return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo) + } + return ioInfo, nil +} + +// Helper function for setting a job object's IO rate control information. +func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil { + return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err) + } + return nil +} + +// Helper function for setting a job object's CPU rate control information. +func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + if _, err := windows.SetInformationJobObject( + job.handle, + windows.JobObjectCpuRateControlInformation, + uintptr(unsafe.Pointer(cpuInfo)), + uint32(unsafe.Sizeof(cpuInfo)), + ); err != nil { + return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/context.go b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go new file mode 100644 index 0000000000..d17d909d93 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go @@ -0,0 +1,118 @@ +package log + +import ( + "context" + + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +type entryContextKeyType int + +const _entryContextKey entryContextKeyType = iota + +var ( + // L is the default, blank logging entry. WithField and co. all return a copy + // of the original entry, so this will not leak fields between calls. + // + // Do NOT modify fields directly, as that will corrupt state for all users and + // is not thread safe. + // Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`. + L = logrus.NewEntry(logrus.StandardLogger()) + + // G is an alias for GetEntry + G = GetEntry + + // S is an alias for SetEntry + S = SetEntry + + // U is an alias for UpdateContext + U = UpdateContext +) + +// GetEntry returns a `logrus.Entry` stored in the context, if one exists. +// Otherwise, it returns a default entry that points to the current context. +// +// Note: if the a new entry is returned, it will reference the passed in context. +// However, existing contexts may be stored in parent contexts and additionally reference +// earlier contexts. +// Use `UpdateContext` to update the entry and context. +func GetEntry(ctx context.Context) *logrus.Entry { + entry := fromContext(ctx) + + if entry == nil { + entry = L.WithContext(ctx) + } + + return entry +} + +// SetEntry updates the log entry in the context with the provided fields, and +// returns both. It is equivalent to: +// +// entry := GetEntry(ctx).WithFields(fields) +// ctx = WithContext(ctx, entry) +// +// See WithContext for more information. +func SetEntry(ctx context.Context, fields logrus.Fields) (context.Context, *logrus.Entry) { + e := GetEntry(ctx) + if len(fields) > 0 { + e = e.WithFields(fields) + } + return WithContext(ctx, e) +} + +// UpdateContext extracts the log entry from the context, and, if the entry's +// context points to a parent's of the current context, ands the entry +// to the most recent context. It is equivalent to: +// +// entry := GetEntry(ctx) +// ctx = WithContext(ctx, entry) +// +// This allows the entry to reference the most recent context and any new +// values (such as span contexts) added to it. +// +// See WithContext for more information. +func UpdateContext(ctx context.Context) context.Context { + // there is no way to check its ctx (and not one of its parents) that contains `e` + // so, at a slight cost, force add `e` to the context + ctx, _ = WithContext(ctx, GetEntry(ctx)) + return ctx +} + +// WithContext returns a context that contains the provided log entry. +// The entry can be extracted with `GetEntry` (`G`) +// +// The entry in the context is a copy of `entry` (generated by `entry.WithContext`) +func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) { + // regardless of the order, entry.Context != GetEntry(ctx) + // here, the returned entry will reference the supplied context + entry = entry.WithContext(ctx) + ctx = context.WithValue(ctx, _entryContextKey, entry) + + return ctx, entry +} + +// Copy extracts the tracing Span and logging entry from the src Context, if they +// exist, and adds them to the dst Context. +// +// This is useful to share tracing and logging between contexts, but not the +// cancellation. For example, if the src Context has been cancelled but cleanup +// operations triggered by the cancellation require a non-cancelled context to +// execute. +func Copy(dst context.Context, src context.Context) context.Context { + if s := trace.FromContext(src); s != nil { + dst = trace.NewContext(dst, s) + } + + if e := fromContext(src); e != nil { + dst, _ = WithContext(dst, e) + } + + return dst +} + +func fromContext(ctx context.Context) *logrus.Entry { + e, _ := ctx.Value(_entryContextKey).(*logrus.Entry) + return e +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go new file mode 100644 index 0000000000..6d69c15b97 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -0,0 +1,114 @@ +package log + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net" + "reflect" + "time" + + "github.com/sirupsen/logrus" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// TimeFormat is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +// Based on RFC3339NanoFixed from github.com/containerd/log +const TimeFormat = "2006-01-02T15:04:05.000000000Z07:00" + +func FormatTime(t time.Time) string { + return t.Format(TimeFormat) +} + +// DurationFormat formats a [time.Duration] log entry. +// +// A nil value signals an error with the formatting. +type DurationFormat func(time.Duration) interface{} + +func DurationFormatString(d time.Duration) interface{} { return d.String() } +func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() } +func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() } + +// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`. +// +// See FormatEnabled for more information. +func FormatIO(ctx context.Context, v interface{}) string { + m := make(map[string]string) + m["type"] = reflect.TypeOf(v).String() + + switch t := v.(type) { + case net.Conn: + m["localAddress"] = formatAddr(t.LocalAddr()) + m["remoteAddress"] = formatAddr(t.RemoteAddr()) + case interface{ Addr() net.Addr }: + m["address"] = formatAddr(t.Addr()) + default: + return Format(ctx, t) + } + + return Format(ctx, m) +} + +func formatAddr(a net.Addr) string { + return a.Network() + "://" + a.String() +} + +// Format formats an object into a JSON string, without any indendtation or +// HTML escapes. +// Context is used to output a log waring if the conversion fails. +// +// This is intended primarily for `trace.StringAttribute()` +func Format(ctx context.Context, v interface{}) string { + b, err := encode(v) + if err != nil { + // logging errors aren't really warning worthy, and can potentially spam a lot of logs out + G(ctx).WithFields(logrus.Fields{ + logrus.ErrorKey: err, + "type": fmt.Sprintf("%T", v), + }).Debug("could not format value") + return "" + } + + return string(b) +} + +func encode(v interface{}) (_ []byte, err error) { + if m, ok := v.(proto.Message); ok { + // use canonical JSON encoding for protobufs (instead of [encoding/json]) + // https://protobuf.dev/programming-guides/proto3/#json + var b []byte + b, err = protojson.MarshalOptions{ + AllowPartial: true, + // protobuf defaults to camel case for JSON encoding; use proto field name instead (snake case) + UseProtoNames: true, + }.Marshal(m) + if err == nil { + // the protojson marshaller tries to unmarshal anypb.Any fields, which can + // fail for types encoded with "github.com/containerd/typeurl/v2" + // we can try creating a dedicated protoregistry.MessageTypeResolver that uses typeurl, but, its + // more robust to fall back on json marshalling for errors in general + return b, nil + } + + } + + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + if jErr := enc.Encode(v); jErr != nil { + if err != nil { + // TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...) + return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr) + } + return nil, fmt.Errorf("json encoding: %w", jErr) + } + + // encoder.Encode appends a newline to the end + return bytes.TrimSpace(buf.Bytes()), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go new file mode 100644 index 0000000000..bb547a329f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go @@ -0,0 +1,173 @@ +package log + +import ( + "bytes" + "reflect" + "time" + + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +const nullString = "null" + +// Hook intercepts and formats a [logrus.Entry] before it logged. +// +// The shim either outputs the logs through an ETW hook, discarding the (formatted) output +// or logs output to a pipe for logging binaries to consume. +// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output +// by the shim. +type Hook struct { + // EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON. + // Variables of [bytes.Buffer] will be converted to []byte. + // + // Default is false. + EncodeAsJSON bool + + // FormatTime specifies the format for [time.Time] variables. + // An empty string disables formatting. + // When disabled, the fall back will the JSON encoding, if enabled. + // + // Default is [TimeFormat]. + TimeFormat string + + // Duration format converts a [time.Duration] fields to an appropriate encoding. + // nil disables formatting. + // When disabled, the fall back will the JSON encoding, if enabled. + // + // Default is [DurationFormatString], which appends a duration unit after the value. + DurationFormat DurationFormat + + // AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to + // the entry from the span context stored in [logrus.Entry.Context], if it exists. + AddSpanContext bool +} + +var _ logrus.Hook = &Hook{} + +func NewHook() *Hook { + return &Hook{ + TimeFormat: TimeFormat, + DurationFormat: DurationFormatString, + AddSpanContext: true, + } +} + +func (h *Hook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (h *Hook) Fire(e *logrus.Entry) (err error) { + // JSON encode, if necessary, then add span information + h.encode(e) + h.addSpanContext(e) + + return nil +} + +// encode loops through all the fields in the [logrus.Entry] and encodes them according to +// the settings in [Hook]. +// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for +// fields of type [time.Time]. +// +// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or +// errors will be encoded via a [json.Marshal] (with HTML escaping disabled). +// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded. +// +// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false, +// then this is a no-op. +func (h *Hook) encode(e *logrus.Entry) { + d := e.Data + + formatTime := h.TimeFormat != "" + formatDuration := h.DurationFormat != nil + if !(h.EncodeAsJSON || formatTime || formatDuration) { + return + } + + for k, v := range d { + // encode types with dedicated formatting options first + + if vv, ok := v.(time.Time); formatTime && ok { + d[k] = vv.Format(h.TimeFormat) + continue + } + + if vv, ok := v.(time.Duration); formatDuration && ok { + d[k] = h.DurationFormat(vv) + continue + } + + // general case JSON encoding + + if !h.EncodeAsJSON { + continue + } + + switch vv := v.(type) { + // built in types + // "json" marshals errors as "{}", so leave alone here + case bool, string, error, uintptr, + int8, int16, int32, int64, int, + uint8, uint32, uint64, uint, + float32, float64: + continue + + // Rather than setting d[k] = vv.String(), JSON encode []byte value, since it + // may be a binary payload and not representable as a string. + // `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`, + // so cannot use `vv.Bytes`. + // Could move to below the `reflect.Indirect()` call below, but + // that would require additional typematching and dereferencing. + // Easier to keep these duplicate branches here. + case bytes.Buffer: + v = vv.Bytes() + case *bytes.Buffer: + v = vv.Bytes() + } + + // dereference pointer or interface variables + rv := reflect.Indirect(reflect.ValueOf(v)) + // check if `v` is a null pointer + if !rv.IsValid() { + d[k] = nullString + continue + } + + switch rv.Kind() { + case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice: + default: + // Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal + // Chan, Func: not supported by json + // Interface, Pointer: dereferenced above + // UnsafePointer: not supported by json, not safe to de-reference; leave alone + continue + } + + b, err := encode(v) + if err != nil { + // Errors are written to stderr (ie, to `panic.log`) and stops the remaining + // hooks (ie, exporting to ETW) from firing. So add encoding errors to + // the entry data to be written out, but keep on processing. + d[k+"-"+logrus.ErrorKey] = err.Error() + // keep the original `v` as the value, + continue + } + d[k] = string(b) + } +} + +func (h *Hook) addSpanContext(e *logrus.Entry) { + ctx := e.Context + if !h.AddSpanContext || ctx == nil { + return + } + span := trace.FromContext(ctx) + if span == nil { + return + } + sctx := span.SpanContext() + e.Data[logfields.TraceID] = sctx.TraceID.String() + e.Data[logfields.SpanID] = sctx.SpanID.String() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/nopformatter.go b/vendor/github.com/Microsoft/hcsshim/internal/log/nopformatter.go new file mode 100644 index 0000000000..909ba68b28 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/nopformatter.go @@ -0,0 +1,12 @@ +package log + +import ( + "github.com/sirupsen/logrus" +) + +type NopFormatter struct{} + +var _ logrus.Formatter = NopFormatter{} + +// Format does nothing and returns a nil slice. +func (NopFormatter) Format(*logrus.Entry) ([]byte, error) { return nil, nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go new file mode 100644 index 0000000000..5a960e0d35 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go @@ -0,0 +1,184 @@ +package log + +import ( + "bytes" + "encoding/json" + "errors" + "sync/atomic" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// This package scrubs objects of potentially sensitive information to pass to logging + +type genMap = map[string]interface{} +type scrubberFunc func(genMap) error + +const _scrubbedReplacement = "" + +var ( + ErrUnknownType = errors.New("encoded object is of unknown type") + + // case sensitive keywords, so "env" is not a substring on "Environment" + _scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")} + + _scrub int32 +) + +// SetScrubbing enables scrubbing +func SetScrubbing(enable bool) { + v := int32(0) // cant convert from bool to int32 directly + if enable { + v = 1 + } + atomic.StoreInt32(&_scrub, v) +} + +// IsScrubbingEnabled checks if scrubbing is enabled +func IsScrubbingEnabled() bool { + v := atomic.LoadInt32(&_scrub) + return v != 0 +} + +// ScrubProcessParameters scrubs HCS Create Process requests with config parameters of +// type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters) +func ScrubProcessParameters(s string) (string, error) { + // todo: deal with v1 ProcessConfig + b := []byte(s) + if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) { + return s, nil + } + + pp := hcsschema.ProcessParameters{} + if err := json.Unmarshal(b, &pp); err != nil { + return "", err + } + pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement} + + b, err := encode(pp) + if err != nil { + return "", err + } + return string(b), nil +} + +// ScrubBridgeCreate scrubs requests sent over the bridge of type +// internal/gcs/protocol.containerCreate wrapping an internal/hcsoci.linuxHostedSystem +func ScrubBridgeCreate(b []byte) ([]byte, error) { + return scrubBytes(b, scrubBridgeCreate) +} + +func scrubBridgeCreate(m genMap) error { + if !isRequestBase(m) { + return ErrUnknownType + } + if ss, ok := m["ContainerConfig"]; ok { + // ContainerConfig is a json encoded struct passed as a regular string field + s, ok := ss.(string) + if !ok { + return ErrUnknownType + } + b, err := scrubBytes([]byte(s), scrubLinuxHostedSystem) + if err != nil { + return err + } + m["ContainerConfig"] = string(b) + return nil + } + return ErrUnknownType +} + +func scrubLinuxHostedSystem(m genMap) error { + if m, ok := index(m, "OciSpecification"); ok { //nolint:govet // shadow + if _, ok := m["annotations"]; ok { + m["annotations"] = map[string]string{_scrubbedReplacement: _scrubbedReplacement} + } + if m, ok := index(m, "process"); ok { //nolint:govet // shadow + if _, ok := m["env"]; ok { + m["env"] = []string{_scrubbedReplacement} + return nil + } + } + } + return ErrUnknownType +} + +// ScrubBridgeExecProcess scrubs requests sent over the bridge of type +// internal/gcs/protocol.containerExecuteProcess +func ScrubBridgeExecProcess(b []byte) ([]byte, error) { + return scrubBytes(b, scrubExecuteProcess) +} + +func scrubExecuteProcess(m genMap) error { + if !isRequestBase(m) { + return ErrUnknownType + } + if m, ok := index(m, "Settings"); ok { //nolint:govet // shadow + if ss, ok := m["ProcessParameters"]; ok { + // ProcessParameters is a json encoded struct passed as a regular sting field + s, ok := ss.(string) + if !ok { + return ErrUnknownType + } + + s, err := ScrubProcessParameters(s) + if err != nil { + return err + } + + m["ProcessParameters"] = s + return nil + } + } + return ErrUnknownType +} + +func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) { + if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) { + return b, nil + } + + m := make(genMap) + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + + // could use regexp, but if the env strings contain braces, the regexp fails + // parsing into individual structs would require access to private structs + if err := scrub(m); err != nil { + return nil, err + } + + b, err := encode(m) + if err != nil { + return nil, err + } + + return b, nil +} + +func isRequestBase(m genMap) bool { + // neither of these are (currently) `omitempty` + _, a := m["ActivityId"] + _, c := m["ContainerId"] + return a && c +} + +// combination `m, ok := m[s]` and `m, ok := m.(genMap)` +func index(m genMap, s string) (genMap, bool) { + if m, ok := m[s]; ok { + mm, ok := m.(genMap) + return mm, ok + } + + return m, false +} + +func hasKeywords(b []byte) bool { + for _, bb := range _scrubKeywords { + if bytes.Contains(b, bb) { + return true + } + } + return false +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go new file mode 100644 index 0000000000..3e175e5222 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -0,0 +1,61 @@ +package logfields + +const ( + // Identifiers + + Name = "name" + Namespace = "namespace" + Operation = "operation" + + ID = "id" + SandboxID = "sid" + ContainerID = "cid" + ExecID = "eid" + ProcessID = "pid" + TaskID = "tid" + UVMID = "uvm-id" + + // networking and IO + + File = "file" + Path = "path" + Bytes = "bytes" + Pipe = "pipe" + + // Common Misc + + Attempt = "attemptNo" + JSON = "json" + + // Time + + StartTime = "startTime" + EndTime = "endTime" + Duration = "duration" + Timeout = "timeout" + + // Keys/values + + Field = "field" + Key = "key" + OCIAnnotation = "oci-annotation" + Value = "value" + Options = "options" + + // Golang type's + + ExpectedType = "expected-type" + Bool = "bool" + Uint32 = "uint32" + Uint64 = "uint64" + + // runhcs + + VMShimOperation = "vmshim-op" + + // logging and tracing + + TraceID = "traceID" + SpanID = "spanID" + ParentSpanID = "parentSpanID" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go new file mode 100644 index 0000000000..e5b8b85e09 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go @@ -0,0 +1,24 @@ +package longpath + +import ( + "path/filepath" + "strings" +) + +// LongAbs makes a path absolute and returns it in NT long path form. +func LongAbs(path string) (string, error) { + if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) { + return path, nil + } + if !filepath.IsAbs(path) { + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + path = absPath + } + if strings.HasPrefix(path, `\\`) { + return `\\?\UNC\` + path[2:], nil + } + return `\\?\` + path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go new file mode 100644 index 0000000000..1ef5814d7e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go @@ -0,0 +1,316 @@ +package memory + +import ( + "github.com/pkg/errors" +) + +const ( + minimumClassSize = MiB + maximumClassSize = 4 * GiB + memoryClassNumber = 7 +) + +var ( + ErrInvalidMemoryClass = errors.New("invalid memory class") + ErrEarlyMerge = errors.New("not all children have been freed") + ErrEmptyPoolOperation = errors.New("operation on empty pool") +) + +// GetMemoryClassType returns the minimum memory class type that can hold a device of +// a given size. The smallest class is 1MB and the largest one is 4GB with 2 bit offset +// intervals in between, for a total of 7 different classes. This function does not +// do a validity check +func GetMemoryClassType(s uint64) classType { + s = (s - 1) >> 20 + memCls := uint32(0) + for s > 0 { + s = s >> 2 + memCls++ + } + return classType(memCls) +} + +// GetMemoryClassSize returns size in bytes for a given memory class +func GetMemoryClassSize(memCls classType) (uint64, error) { + if memCls >= memoryClassNumber { + return 0, ErrInvalidMemoryClass + } + return minimumClassSize << (2 * memCls), nil +} + +// region represents a contiguous memory block +type region struct { + // parent region that has been split into 4 + parent *region + class classType + // offset represents offset in bytes + offset uint64 +} + +// memoryPool tracks free and busy (used) memory regions +type memoryPool struct { + free map[uint64]*region + busy map[uint64]*region +} + +// PoolAllocator implements a memory allocation strategy similar to buddy-malloc https://github.com/evanw/buddy-malloc/blob/master/buddy-malloc.c +// We borrow the idea of spanning a tree of fixed size regions on top of a contiguous memory +// space. +// +// There are a total of 7 different region sizes that can be allocated, with the smallest +// being 1MB and the largest 4GB (the default maximum size of a Virtual PMem device). +// +// For efficiency and to reduce fragmentation an entire region is allocated when requested. +// When there's no available region of requested size, we try to allocate more memory for +// this particular size by splitting the next available larger region into smaller ones, e.g. +// if there's no region available for size class 0, we try splitting a region from class 1, +// then class 2 etc, until we are able to do so or hit the upper limit. +type PoolAllocator struct { + pools [memoryClassNumber]*memoryPool +} + +var _ MappedRegion = ®ion{} +var _ Allocator = &PoolAllocator{} + +func (r *region) Offset() uint64 { + return r.offset +} + +func (r *region) Size() uint64 { + sz, err := GetMemoryClassSize(r.class) + if err != nil { + panic(err) + } + return sz +} + +func (r *region) Type() classType { + return r.class +} + +func newEmptyMemoryPool() *memoryPool { + return &memoryPool{ + free: make(map[uint64]*region), + busy: make(map[uint64]*region), + } +} + +func NewPoolMemoryAllocator() PoolAllocator { + pa := PoolAllocator{} + p := newEmptyMemoryPool() + // by default we allocate a single region with maximum possible size (class type) + p.free[0] = ®ion{ + class: memoryClassNumber - 1, + offset: 0, + } + pa.pools[memoryClassNumber-1] = p + return pa +} + +// Allocate checks memory region pool for the given `size` and returns a free region with +// minimal offset, if none available tries expanding matched memory pool. +// +// Internally it's done via moving a region from free pool into a busy pool +func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { + memCls := GetMemoryClassType(size) + if memCls >= memoryClassNumber { + return nil, ErrInvalidMemoryClass + } + + // find region with the smallest offset + nextCls, nextOffset, err := pa.findNextOffset(memCls) + if err != nil { + return nil, err + } + + // this means that there are no more regions for the current class, try expanding + if nextCls != memCls { + if err := pa.split(memCls); err != nil { + if err == ErrInvalidMemoryClass { + return nil, ErrNotEnoughSpace + } + return nil, err + } + } + + if err := pa.markBusy(memCls, nextOffset); err != nil { + return nil, err + } + + // by this point memory pool for memCls should have been created, + // either prior or during split call + if r := pa.pools[memCls].busy[nextOffset]; r != nil { + return r, nil + } + + return nil, ErrNotEnoughSpace +} + +// Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into +// a bigger one +func (pa *PoolAllocator) Release(reg MappedRegion) error { + mp := pa.pools[reg.Type()] + if mp == nil { + return ErrEmptyPoolOperation + } + + err := pa.markFree(reg.Type(), reg.Offset()) + if err != nil { + return err + } + + n := mp.free[reg.Offset()] + if n == nil { + return ErrNotAllocated + } + if err := pa.merge(n.parent); err != nil { + if err != ErrEarlyMerge { + return err + } + } + return nil +} + +// findNextOffset finds next region location for a given memCls +func (pa *PoolAllocator) findNextOffset(memCls classType) (classType, uint64, error) { + for mc := memCls; mc < memoryClassNumber; mc++ { + pi := pa.pools[mc] + if pi == nil || len(pi.free) == 0 { + continue + } + + target := uint64(maximumClassSize) + for offset := range pi.free { + if offset < target { + target = offset + } + } + return mc, target, nil + } + return 0, 0, ErrNotEnoughSpace +} + +// split tries to recursively split a bigger memory region into smaller ones until it succeeds or hits the upper limit +func (pa *PoolAllocator) split(clsType classType) error { + nextClsType := clsType + 1 + if nextClsType >= memoryClassNumber { + return ErrInvalidMemoryClass + } + + nextPool := pa.pools[nextClsType] + if nextPool == nil { + nextPool = newEmptyMemoryPool() + pa.pools[nextClsType] = nextPool + } + + cls, offset, err := pa.findNextOffset(nextClsType) + if err != nil { + return err + } + // not enough memory in the next class, try to recursively expand + if cls != nextClsType { + if err := pa.split(nextClsType); err != nil { + return err + } + } + + if err := pa.markBusy(nextClsType, offset); err != nil { + return err + } + + // memCls validity has been checked already, we can ignore the error + clsSize, _ := GetMemoryClassSize(clsType) + + nextReg := nextPool.busy[offset] + if nextReg == nil { + return ErrNotAllocated + } + + // expand memCls + cp := pa.pools[clsType] + if cp == nil { + cp = newEmptyMemoryPool() + pa.pools[clsType] = cp + } + // create 4 smaller regions + for i := uint64(0); i < 4; i++ { + offset := nextReg.offset + i*clsSize + reg := ®ion{ + parent: nextReg, + class: clsType, + offset: offset, + } + cp.free[offset] = reg + } + return nil +} + +func (pa *PoolAllocator) merge(parent *region) error { + // nothing to merge + if parent == nil { + return nil + } + + childCls := parent.class - 1 + childPool := pa.pools[childCls] + // no child nodes to merge, try to merge parent + if childPool == nil { + return pa.merge(parent.parent) + } + + childSize, err := GetMemoryClassSize(childCls) + if err != nil { + return err + } + + // check if all the child nodes are free + var children []*region + for i := uint64(0); i < 4; i++ { + child, free := childPool.free[parent.offset+i*childSize] + if !free { + return ErrEarlyMerge + } + children = append(children, child) + } + + // at this point all the child nodes will be free and we can merge + for _, child := range children { + delete(childPool.free, child.offset) + } + + if err := pa.markFree(parent.class, parent.offset); err != nil { + return err + } + + return pa.merge(parent.parent) +} + +// markFree internally moves a region with `offset` from busy to free map +func (pa *PoolAllocator) markFree(memCls classType, offset uint64) error { + clsPool := pa.pools[memCls] + if clsPool == nil { + return ErrEmptyPoolOperation + } + + if reg, exists := clsPool.busy[offset]; exists { + clsPool.free[offset] = reg + delete(clsPool.busy, offset) + return nil + } + return ErrNotAllocated +} + +// markBusy internally moves a region with `offset` from free to busy map +func (pa *PoolAllocator) markBusy(memCls classType, offset uint64) error { + clsPool := pa.pools[memCls] + if clsPool == nil { + return ErrEmptyPoolOperation + } + + if reg, exists := clsPool.free[offset]; exists { + clsPool.busy[offset] = reg + delete(clsPool.free, offset) + return nil + } + return ErrNotAllocated +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go b/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go new file mode 100644 index 0000000000..d6cdb8cc4c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go @@ -0,0 +1,28 @@ +package memory + +import "github.com/pkg/errors" + +type classType uint32 + +const ( + MiB = 1024 * 1024 + GiB = 1024 * MiB +) + +var ( + ErrNotEnoughSpace = errors.New("not enough space") + ErrNotAllocated = errors.New("no memory allocated at the given offset") +) + +// MappedRegion represents a memory block with an offset +type MappedRegion interface { + Offset() uint64 + Size() uint64 + Type() classType +} + +// Allocator is an interface for memory allocation +type Allocator interface { + Allocate(uint64) (MappedRegion, error) + Release(MappedRegion) error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go new file mode 100644 index 0000000000..7e95efb30d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go @@ -0,0 +1,52 @@ +package mergemaps + +import "encoding/json" + +// Merge recursively merges map `fromMap` into map `ToMap`. Any pre-existing values +// in ToMap are overwritten. Values in fromMap are added to ToMap. +// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang +func Merge(fromMap, ToMap interface{}) interface{} { + switch fromMap := fromMap.(type) { + case map[string]interface{}: + ToMap, ok := ToMap.(map[string]interface{}) + if !ok { + return fromMap + } + for keyToMap, valueToMap := range ToMap { + if valueFromMap, ok := fromMap[keyToMap]; ok { + fromMap[keyToMap] = Merge(valueFromMap, valueToMap) + } else { + fromMap[keyToMap] = valueToMap + } + } + case nil: + // merge(nil, map[string]interface{...}) -> map[string]interface{...} + ToMap, ok := ToMap.(map[string]interface{}) + if ok { + return ToMap + } + } + return fromMap +} + +// MergeJSON merges the contents of a JSON string into an object representation, +// returning a new object suitable for translating to JSON. +func MergeJSON(object interface{}, additionalJSON []byte) (interface{}, error) { + if len(additionalJSON) == 0 { + return object, nil + } + objectJSON, err := json.Marshal(object) + if err != nil { + return nil, err + } + var objectMap, newMap map[string]interface{} + err = json.Unmarshal(objectJSON, &objectMap) + if err != nil { + return nil, err + } + err = json.Unmarshal(additionalJSON, &newMap) + if err != nil { + return nil, err + } + return Merge(newMap, objectMap), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go new file mode 100644 index 0000000000..71df25b8df --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go @@ -0,0 +1,69 @@ +package oc + +import ( + "errors" + "io" + "net" + "os" + + "github.com/containerd/containerd/errdefs" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there +// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error) + +func toStatusCode(err error) codes.Code { + // checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status, + // wraps an error defined in "github.com/containerd/containerd/errdefs", or is a + // context timeout or cancelled error + if s, ok := status.FromError(errdefs.ToGRPC(err)); ok { + return s.Code() + } + + switch { + // case isAny(err): + // return codes.Cancelled + case isAny(err, os.ErrInvalid): + return codes.InvalidArgument + case isAny(err, os.ErrDeadlineExceeded): + return codes.DeadlineExceeded + case isAny(err, os.ErrNotExist): + return codes.NotFound + case isAny(err, os.ErrExist): + return codes.AlreadyExists + case isAny(err, os.ErrPermission): + return codes.PermissionDenied + // case isAny(err): + // return codes.ResourceExhausted + case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer): + return codes.FailedPrecondition + // case isAny(err): + // return codes.Aborted + // case isAny(err): + // return codes.OutOfRange + // case isAny(err): + // return codes.Unimplemented + case isAny(err, io.ErrNoProgress): + return codes.Internal + // case isAny(err): + // return codes.Unavailable + case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF): + return codes.DataLoss + // case isAny(err): + // return codes.Unauthenticated + default: + return codes.Unknown + } +} + +// isAny returns true if errors.Is is true for any of the provided errors, errs. +func isAny(err error, errs ...error) bool { + for _, e := range errs { + if errors.Is(err, e) { + return true + } + } + return false +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go new file mode 100644 index 0000000000..28f8f43a93 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go @@ -0,0 +1,86 @@ +package oc + +import ( + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" + "google.golang.org/grpc/codes" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" +) + +const spanMessage = "Span" + +var _errorCodeKey = logrus.ErrorKey + "Code" + +// LogrusExporter is an OpenCensus `trace.Exporter` that exports +// `trace.SpanData` to logrus output. +type LogrusExporter struct{} + +var _ trace.Exporter = &LogrusExporter{} + +// ExportSpan exports `s` based on the the following rules: +// +// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`, +// `s.SpanID`, and `s.ParentSpanID` for correlation +// +// 2. Any calls to .Annotate will not be supported. +// +// 3. The span itself will be written at `logrus.InfoLevel` unless +// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` +// providing `s.Status.Message` as the error value. +func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { + if s.DroppedAnnotationCount > 0 { + logrus.WithFields(logrus.Fields{ + "name": s.Name, + logfields.TraceID: s.TraceID.String(), + logfields.SpanID: s.SpanID.String(), + "dropped": s.DroppedAttributeCount, + "maxAttributes": len(s.Attributes), + }).Warning("span had dropped attributes") + } + + entry := log.L.Dup() + // Combine all span annotations with span data (eg, trace ID, span ID, parent span ID, + // error, status code) + // (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can + // can skip overhead in entry.WithFields() and add them directly to entry.Data. + // Preallocate ahead of time, since we should add, at most, 10 additional entries + data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10) + + // Default log entry may have prexisting/application-wide data + for k, v := range entry.Data { + data[k] = v + } + for k, v := range s.Attributes { + data[k] = v + } + + data[logfields.Name] = s.Name + data[logfields.TraceID] = s.TraceID.String() + data[logfields.SpanID] = s.SpanID.String() + data[logfields.ParentSpanID] = s.ParentSpanID.String() + data[logfields.StartTime] = s.StartTime + data[logfields.EndTime] = s.EndTime + data[logfields.Duration] = s.EndTime.Sub(s.StartTime) + if sk := spanKindToString(s.SpanKind); sk != "" { + data["spanKind"] = sk + } + + level := logrus.InfoLevel + if s.Status.Code != 0 { + level = logrus.ErrorLevel + + // don't overwrite an existing "error" or "errorCode" attributes + if _, ok := data[logrus.ErrorKey]; !ok { + data[logrus.ErrorKey] = s.Status.Message + } + if _, ok := data[_errorCodeKey]; !ok { + data[_errorCodeKey] = codes.Code(s.Status.Code).String() + } + } + + entry.Data = data + entry.Time = s.StartTime + entry.Log(level, spanMessage) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go new file mode 100644 index 0000000000..7260784326 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go @@ -0,0 +1,58 @@ +package oc + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/log" + "go.opencensus.io/trace" +) + +var DefaultSampler = trace.AlwaysSample() + +// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If +// `err` is `nil` assumes `trace.StatusCodeOk`. +func SetSpanStatus(span *trace.Span, err error) { + status := trace.Status{} + if err != nil { + status.Code = int32(toStatusCode(err)) + status.Message = err.Error() + } + span.SetStatus(status) +} + +// StartSpan wraps "go.opencensus.io/trace".StartSpan, but, if the span is sampling, +// adds a log entry to the context that points to the newly created span. +func StartSpan(ctx context.Context, name string, o ...trace.StartOption) (context.Context, *trace.Span) { + ctx, s := trace.StartSpan(ctx, name, o...) + return update(ctx, s) +} + +// StartSpanWithRemoteParent wraps "go.opencensus.io/trace".StartSpanWithRemoteParent. +// +// See StartSpan for more information. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent trace.SpanContext, o ...trace.StartOption) (context.Context, *trace.Span) { + ctx, s := trace.StartSpanWithRemoteParent(ctx, name, parent, o...) + return update(ctx, s) +} + +func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) { + if s.IsRecordingEvents() { + ctx = log.UpdateContext(ctx) + } + + return ctx, s +} + +var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer) +var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient) + +func spanKindToString(sk int) string { + switch sk { + case trace.SpanKindClient: + return "client" + case trace.SpanKindServer: + return "server" + default: + return "" + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go b/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go new file mode 100644 index 0000000000..4f441803b7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go @@ -0,0 +1,76 @@ +package guestrequest + +// These are constants for v2 schema modify requests. + +type RequestType string +type ResourceType string + +// RequestType const. +const ( + RequestTypeAdd RequestType = "Add" + RequestTypeRemove RequestType = "Remove" + RequestTypePreAdd RequestType = "PreAdd" // For networking + RequestTypeUpdate RequestType = "Update" +) + +type SignalValueWCOW string + +const ( + SignalValueWCOWCtrlC SignalValueWCOW = "CtrlC" + SignalValueWCOWCtrlBreak SignalValueWCOW = "CtrlBreak" + SignalValueWCOWCtrlClose SignalValueWCOW = "CtrlClose" + SignalValueWCOWCtrlLogOff SignalValueWCOW = "CtrlLogOff" + SignalValueWCOWCtrlShutdown SignalValueWCOW = "CtrlShutdown" +) + +// ModificationRequest is for modify commands passed to the guest. +type ModificationRequest struct { + RequestType RequestType `json:"RequestType,omitempty"` + ResourceType ResourceType `json:"ResourceType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +type NetworkModifyRequest struct { + AdapterId string `json:"AdapterId,omitempty"` //nolint:stylecheck + RequestType RequestType `json:"RequestType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +type RS4NetworkModifyRequest struct { + AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` //nolint:stylecheck + RequestType RequestType `json:"RequestType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +var ( + // V5 GUIDs for SCSI controllers + // These GUIDs are created with namespace GUID "d422512d-2bf2-4752-809d-7b82b5fcb1b4" + // and index as names. For example, first GUID is created like this: + // guid.NewV5("d422512d-2bf2-4752-809d-7b82b5fcb1b4", []byte("0")) + ScsiControllerGuids = []string{ + "df6d0690-79e5-55b6-a5ec-c1e2f77f580a", + "0110f83b-de10-5172-a266-78bca56bf50a", + "b5d2d8d4-3a75-51bf-945b-3444dc6b8579", + "305891a9-b251-5dfe-91a2-c25d9212275b", + } +) + +// constants for v2 schema ProcessModifyRequest + +// Operation type for [hcsschema.ProcessModifyRequest]. +type ProcessModifyOperation string + +const ( + ModifyProcessConsoleSize ProcessModifyOperation = "ConsoleSize" + CloseProcessHandle ProcessModifyOperation = "CloseHandle" +) + +// Standard IO handle(s) to close for [hcsschema.CloseHandle] in [hcsschema.ProcessModifyRequest]. +type STDIOHandle string + +const ( + STDInHandle STDIOHandle = "StdIn" + STDOutHandle STDIOHandle = "StdOut" + STDErrHandle STDIOHandle = "StdErr" + AllHandles STDIOHandle = "All" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go new file mode 100644 index 0000000000..4eb9bb9f1f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go @@ -0,0 +1,92 @@ +package queue + +import ( + "errors" + "sync" +) + +var ErrQueueClosed = errors.New("the queue is closed for reading and writing") + +// MessageQueue represents a threadsafe message queue to be used to retrieve or +// write messages to. +type MessageQueue struct { + m *sync.RWMutex + c *sync.Cond + messages []interface{} + closed bool +} + +// NewMessageQueue returns a new MessageQueue. +func NewMessageQueue() *MessageQueue { + m := &sync.RWMutex{} + return &MessageQueue{ + m: m, + c: sync.NewCond(m), + messages: []interface{}{}, + } +} + +// Enqueue writes `msg` to the queue. +func (mq *MessageQueue) Enqueue(msg interface{}) error { + mq.m.Lock() + defer mq.m.Unlock() + + if mq.closed { + return ErrQueueClosed + } + mq.messages = append(mq.messages, msg) + // Signal a waiter that there is now a value available in the queue. + mq.c.Signal() + return nil +} + +// Dequeue will read a value from the queue and remove it. If the queue +// is empty, this will block until the queue is closed or a value gets enqueued. +func (mq *MessageQueue) Dequeue() (interface{}, error) { + mq.m.Lock() + defer mq.m.Unlock() + + for !mq.closed && mq.size() == 0 { + mq.c.Wait() + } + + // We got woken up, check if it's because the queue got closed. + if mq.closed { + return nil, ErrQueueClosed + } + + val := mq.messages[0] + mq.messages[0] = nil + mq.messages = mq.messages[1:] + return val, nil +} + +// Size returns the size of the queue. +func (mq *MessageQueue) Size() int { + mq.m.RLock() + defer mq.m.RUnlock() + return mq.size() +} + +// Nonexported size check to check if the queue is empty inside already locked functions. +func (mq *MessageQueue) size() int { + return len(mq.messages) +} + +// Close closes the queue for future writes or reads. Any attempts to read or write from the +// queue after close will return ErrQueueClosed. This is safe to call multiple times. +func (mq *MessageQueue) Close() { + mq.m.Lock() + defer mq.m.Unlock() + + // Already closed, noop + if mq.closed { + return + } + + mq.messages = nil + mq.closed = true + // If there's anybody currently waiting on a value from Dequeue, we need to + // broadcast so the read(s) can return ErrQueueClosed. + mq.c.Broadcast() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go new file mode 100644 index 0000000000..f211d25e72 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go @@ -0,0 +1 @@ +package safefile diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go new file mode 100644 index 0000000000..70368533b4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -0,0 +1,403 @@ +//go:build windows + +package safefile + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + "syscall" + "unicode/utf16" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/winapi" + + winio "github.com/Microsoft/go-winio" +) + +func OpenRoot(path string) (*os.File, error) { + longpath, err := longpath.LongAbs(path) + if err != nil { + return nil, err + } + return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) +} + +func cleanGoStringRelativePath(path string) (string, error) { + path = filepath.Clean(path) + if strings.Contains(path, ":") { + // Since alternate data streams must follow the file they + // are attached to, finding one here (out of order) is invalid. + return "", errors.New("path contains invalid character `:`") + } + fspath := filepath.FromSlash(path) + if len(fspath) > 0 && fspath[0] == '\\' { + return "", errors.New("expected relative path") + } + return fspath, nil +} + +func ntRelativePath(path string) ([]uint16, error) { + fspath, err := cleanGoStringRelativePath(path) + if err != nil { + return nil, err + } + + path16 := utf16.Encode(([]rune)(fspath)) + if len(path16) > 32767 { + return nil, syscall.ENAMETOOLONG + } + + return path16, nil +} + +// openRelativeInternal opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + var ( + h uintptr + iosb winapi.IOStatusBlock + oa winapi.ObjectAttributes + ) + + cleanRelativePath, err := cleanGoStringRelativePath(path) + if err != nil { + return nil, err + } + + if root == nil || root.Fd() == 0 { + return nil, errors.New("missing root directory") + } + + pathUnicode, err := winapi.NewUnicodeString(cleanRelativePath) + if err != nil { + return nil, err + } + + oa.Length = unsafe.Sizeof(oa) + oa.ObjectName = pathUnicode + oa.RootDirectory = uintptr(root.Fd()) + oa.Attributes = winapi.OBJ_DONT_REPARSE + status := winapi.NtCreateFile( + &h, + accessMask|syscall.SYNCHRONIZE, + &oa, + &iosb, + nil, + 0, + shareFlags, + createDisposition, + winapi.FILE_OPEN_FOR_BACKUP_INTENT|winapi.FILE_SYNCHRONOUS_IO_NONALERT|flags, + nil, + 0, + ) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + + fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path)) + if err != nil { + syscall.Close(syscall.Handle(h)) + return nil, err + } + + return os.NewFile(h, fullPath), nil +} + +// OpenRelative opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func OpenRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags) + if err != nil { + err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err} + } + return f, err +} + +// LinkRelative creates a hard link from oldname to newname (relative to oldroot +// and newroot), failing if any of the intermediate path components are reparse +// points. +func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error { + // Open the old file. + oldf, err := openRelativeInternal( + oldname, + oldroot, + syscall.FILE_WRITE_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + 0, + ) + if err != nil { + return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer oldf.Close() + + // Open the parent of the new file. + var parent *os.File + parentPath := filepath.Dir(newname) + if parentPath != "." { + parent, err = openRelativeInternal( + parentPath, + newroot, + syscall.GENERIC_READ, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + winapi.FILE_DIRECTORY_FILE) + if err != nil { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer parent.Close() + + fi, err := winio.GetFileBasicInfo(parent) + if err != nil { + return err + } + if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)} + } + } else { + parent = newroot + } + + // Issue an NT call to create the link. This will be safe because NT will + // not open any more directories to create the link, so it cannot walk any + // more reparse points. + newbase := filepath.Base(newname) + newbase16, err := ntRelativePath(newbase) + if err != nil { + return err + } + + size := int(unsafe.Offsetof(winapi.FileLinkInformation{}.FileName)) + len(newbase16)*2 + linkinfoBuffer := winapi.LocalAlloc(0, size) + defer winapi.LocalFree(linkinfoBuffer) + + linkinfo := (*winapi.FileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) + linkinfo.RootDirectory = parent.Fd() + linkinfo.FileNameLength = uint32(len(newbase16) * 2) + copy(winapi.Uint16BufferToSlice(&linkinfo.FileName[0], len(newbase16)), newbase16) + + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( + oldf.Fd(), + &iosb, + linkinfoBuffer, + uint32(size), + winapi.FileLinkInformationClass, + ) + if status != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: winapi.RtlNtStatusToDosError(status)} + } + + return nil +} + +// deleteOnClose marks a file to be deleted when the handle is closed. +func deleteOnClose(f *os.File) error { + disposition := winapi.FileDispositionInformationEx{Flags: winapi.FILE_DISPOSITION_DELETE} + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( + f.Fd(), + &iosb, + uintptr(unsafe.Pointer(&disposition)), + uint32(unsafe.Sizeof(disposition)), + winapi.FileDispositionInformationExClass, + ) + if status != 0 { + return winapi.RtlNtStatusToDosError(status) + } + return nil +} + +// clearReadOnly clears the readonly attribute on a file. +func clearReadOnly(f *os.File) error { + bi, err := winio.GetFileBasicInfo(f) + if err != nil { + return err + } + if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 { + return nil + } + sbi := winio.FileBasicInfo{ + FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY, + } + if sbi.FileAttributes == 0 { + sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL + } + return winio.SetFileBasicInfo(f, &sbi) +} + +// RemoveRelative removes a file or directory relative to a root, failing if any +// intermediate path components are reparse points. +func RemoveRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + winapi.FILE_READ_ATTRIBUTES|winapi.FILE_WRITE_ATTRIBUTES|winapi.DELETE, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) + if err == nil { + defer f.Close() + err = deleteOnClose(f) + if err == syscall.ERROR_ACCESS_DENIED { + // Maybe the file is marked readonly. Clear the bit and retry. + _ = clearReadOnly(f) + err = deleteOnClose(f) + } + } + if err != nil { + return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err} + } + return nil +} + +// RemoveAllRelative removes a directory tree relative to a root, failing if any +// intermediate path components are reparse points. +func RemoveAllRelative(path string, root *os.File) error { + fi, err := LstatRelative(path, root) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + // If this is a reparse point, it can't have children. Simple remove will do. + err := RemoveRelative(path, root) + if err == nil || os.IsNotExist(err) { + return nil + } + return err + } + + // It is necessary to use os.Open as Readdirnames does not work with + // OpenRelative. This is safe because the above LstatRelative fails + // if the target is outside the root, and we know this is not a + // symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check. + fd, err := os.Open(filepath.Join(root.Name(), path)) + if err != nil { + if os.IsNotExist(err) { + // Race. It was deleted between the Lstat and Open. + // Return nil per RemoveAll's docs. + return nil + } + return err + } + + // Remove contents & return first error. + for { + names, err1 := fd.Readdirnames(100) + for _, name := range names { + if err2 := RemoveAllRelative(path+string(os.PathSeparator)+name, root); err == nil { + err = err2 + } + } + if err1 == io.EOF { + // Readdirnames has no more files to return + break + } + // If Readdirnames returned an error, use it. + if err == nil { + err = err1 + } + if len(names) == 0 { + break + } + } + fd.Close() + + // Remove directory. + err1 := RemoveRelative(path, root) + if err1 == nil || os.IsNotExist(err1) { + return nil + } + if err == nil { + err = err1 + } + return err +} + +// MkdirRelative creates a directory relative to a root, failing if any +// intermediate path components are reparse points. +func MkdirRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_CREATE, + winapi.FILE_DIRECTORY_FILE) + if err == nil { + f.Close() + } else { + err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err} + } + return err +} + +// MkdirAllRelative creates each directory in the path relative to a root, failing if +// any existing intermediate path components are reparse points. +func MkdirAllRelative(path string, root *os.File) error { + pathParts := strings.Split(filepath.Clean(path), (string)(filepath.Separator)) + for index := range pathParts { + partialPath := filepath.Join(pathParts[0 : index+1]...) + stat, err := LstatRelative(partialPath, root) + + if err != nil { + if os.IsNotExist(err) { + if err := MkdirRelative(partialPath, root); err != nil { + return err + } + continue + } + return err + } + + if !stat.IsDir() { + fullPath := filepath.Join(root.Name(), partialPath) + return &os.PathError{Op: "mkdir", Path: fullPath, Err: syscall.ENOTDIR} + } + } + + return nil +} + +// LstatRelative performs a stat operation on a file relative to a root, failing +// if any intermediate path components are reparse points. +func LstatRelative(path string, root *os.File) (os.FileInfo, error) { + f, err := openRelativeInternal( + path, + root, + winapi.FILE_READ_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) + if err != nil { + return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} + } + defer f.Close() + return f.Stat() +} + +// EnsureNotReparsePointRelative validates that a given file (relative to a +// root) and all intermediate path components are not a reparse points. +func EnsureNotReparsePointRelative(path string, root *os.File) error { + // Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT. + f, err := OpenRelative( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + 0) + if err != nil { + return err + } + f.Close() + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go new file mode 100644 index 0000000000..7dfa1e594c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go @@ -0,0 +1,186 @@ +//go:build windows +// +build windows + +package security + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +type ( + accessMask uint32 + accessMode uint32 + desiredAccess uint32 + inheritMode uint32 + objectType uint32 + shareMode uint32 + securityInformation uint32 + trusteeForm uint32 + trusteeType uint32 +) + +type explicitAccess struct { + accessPermissions accessMask + accessMode accessMode + inheritance inheritMode + trustee trustee +} + +type trustee struct { + multipleTrustee *trustee + multipleTrusteeOperation int32 + trusteeForm trusteeForm + trusteeType trusteeType + name uintptr +} + +const ( + AccessMaskNone accessMask = 0 + AccessMaskRead accessMask = 1 << 31 // GENERIC_READ + AccessMaskWrite accessMask = 1 << 30 // GENERIC_WRITE + AccessMaskExecute accessMask = 1 << 29 // GENERIC_EXECUTE + AccessMaskAll accessMask = 1 << 28 // GENERIC_ALL + + accessMaskDesiredPermission = AccessMaskRead + + accessModeGrant accessMode = 1 + + desiredAccessReadControl desiredAccess = 0x20000 + desiredAccessWriteDac desiredAccess = 0x40000 + + gvmga = "GrantVmGroupAccess:" + + inheritModeNoInheritance inheritMode = 0x0 + inheritModeSubContainersAndObjectsInherit inheritMode = 0x3 + + objectTypeFileObject objectType = 0x1 + + securityInformationDACL securityInformation = 0x4 + + shareModeRead shareMode = 0x1 + shareModeWrite shareMode = 0x2 + + //nolint:stylecheck // ST1003 + sidVmGroup = "S-1-5-83-0" + + trusteeFormIsSid trusteeForm = 0 + + trusteeTypeWellKnownGroup trusteeType = 5 +) + +// GrantVmGroupAccess sets the DACL for a specified file or directory to +// include Grant ACE entries for the VM Group SID. This is a golang re- +// implementation of the same function in vmcompute, just not exported in +// RS5. Which kind of sucks. Sucks a lot :/ +func GrantVmGroupAccess(name string) error { //nolint:stylecheck // ST1003 + return GrantVmGroupAccessWithMask(name, accessMaskDesiredPermission) +} + +// GrantVmGroupAccessWithMask sets the desired DACL for a specified file or +// directory. +func GrantVmGroupAccessWithMask(name string, access accessMask) error { //nolint:stylecheck // ST1003 + if access == 0 || access<<4 != 0 { + return fmt.Errorf("invalid access mask: 0x%08x", access) + } + // Stat (to determine if `name` is a directory). + s, err := os.Stat(name) + if err != nil { + return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err) + } + + // Get a handle to the file/directory. Must defer Close on success. + fd, err := createFile(name, s.IsDir()) + if err != nil { + return err // Already wrapped + } + defer func() { + _ = syscall.CloseHandle(fd) + }() + + // Get the current DACL and Security Descriptor. Must defer LocalFree on success. + ot := objectTypeFileObject + si := securityInformationDACL + sd := uintptr(0) + origDACL := uintptr(0) + if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { + return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) + } + defer func() { + _, _ = syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + }() + + // Generate a new DACL which is the current DACL with the required ACEs added. + // Must defer LocalFree on success. + newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), access, origDACL) + if err != nil { + return err // Already wrapped + } + defer func() { + _, _ = syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + }() + + // And finally use SetSecurityInfo to apply the updated DACL. + if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { + return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err) + } + + return nil +} + +// createFile is a helper function to call [Nt]CreateFile to get a handle to +// the file or directory. +func createFile(name string, isDir bool) (syscall.Handle, error) { + namep, err := syscall.UTF16FromString(name) + if err != nil { + return 0, fmt.Errorf("syscall.UTF16FromString %s: %w", name, err) + } + da := uint32(desiredAccessReadControl | desiredAccessWriteDac) + sm := uint32(shareModeRead | shareModeWrite) + fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) + if isDir { + fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) + } + fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) + if err != nil { + return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) + } + return fd, nil +} + +// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. +// The caller is responsible for LocalFree of the returned DACL on success. +func generateDACLWithAcesAdded(name string, isDir bool, desiredAccess accessMask, origDACL uintptr) (uintptr, error) { + // Generate pointers to the SIDs based on the string SIDs + sid, err := syscall.StringToSid(sidVmGroup) + if err != nil { + return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err) + } + + inheritance := inheritModeNoInheritance + if isDir { + inheritance = inheritModeSubContainersAndObjectsInherit + } + + eaArray := []explicitAccess{ + { + accessPermissions: desiredAccess, + accessMode: accessModeGrant, + inheritance: inheritance, + trustee: trustee{ + trusteeForm: trusteeFormIsSid, + trusteeType: trusteeTypeWellKnownGroup, + name: uintptr(unsafe.Pointer(sid)), + }, + }, + } + + modifiedDACL := uintptr(0) + if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { + return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err) + } + + return modifiedDACL, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go new file mode 100644 index 0000000000..71326e4e46 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go @@ -0,0 +1,7 @@ +package security + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go syscall_windows.go + +//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo +//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo +//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go new file mode 100644 index 0000000000..26c986b88f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go @@ -0,0 +1,72 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package security + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") +) + +func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go new file mode 100644 index 0000000000..eaf39fa513 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go @@ -0,0 +1,74 @@ +package timeout + +import ( + "os" + "strconv" + "time" +) + +var ( + // defaultTimeout is the timeout for most operations that is not overridden. + defaultTimeout = 4 * time.Minute + + // defaultTimeoutTestdRetry is the retry loop timeout for testd to respond + // for a disk to come online in LCOW. + defaultTimeoutTestdRetry = 5 * time.Second +) + +// External variables for HCSShim consumers to use. +var ( + // SystemCreate is the timeout for creating a compute system + SystemCreate time.Duration = defaultTimeout + + // SystemStart is the timeout for starting a compute system + SystemStart time.Duration = defaultTimeout + + // SystemPause is the timeout for pausing a compute system + SystemPause time.Duration = defaultTimeout + + // SystemResume is the timeout for resuming a compute system + SystemResume time.Duration = defaultTimeout + + // SystemSave is the timeout for saving a compute system + SystemSave time.Duration = defaultTimeout + + // SyscallWatcher is the timeout before warning of a potential stuck platform syscall. + SyscallWatcher time.Duration = defaultTimeout + + // Tar2VHD is the timeout for the tar2vhd operation to complete + Tar2VHD time.Duration = defaultTimeout + + // ExternalCommandToStart is the timeout for external commands to start + ExternalCommandToStart = defaultTimeout + + // ExternalCommandToComplete is the timeout for external commands to complete. + // Generally this means copying data from their stdio pipes. + ExternalCommandToComplete = defaultTimeout + + // TestDRetryLoop is the timeout for testd retry loop when onlining a SCSI disk in LCOW + TestDRetryLoop = defaultTimeoutTestdRetry +) + +func init() { + SystemCreate = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMCREATE", SystemCreate) + SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart) + SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause) + SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume) + SystemSave = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSAVE", SystemSave) + SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher) + Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD) + ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart) + ExternalCommandToComplete = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDCOMPLETE", ExternalCommandToComplete) + TestDRetryLoop = durationFromEnvironment("HCSSHIM_TIMEOUT_TESTDRETRYLOOP", TestDRetryLoop) +} + +func durationFromEnvironment(env string, defaultValue time.Duration) time.Duration { + envTimeout := os.Getenv(env) + if len(envTimeout) > 0 { + e, err := strconv.Atoi(envTimeout) + if err == nil && e > 0 { + return time.Second * time.Duration(e) + } + } + return defaultValue +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go new file mode 100644 index 0000000000..9dd00c8128 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go @@ -0,0 +1 @@ +package vmcompute diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go new file mode 100644 index 0000000000..79b14ef972 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -0,0 +1,637 @@ +//go:build windows + +package vmcompute + +import ( + gcontext "context" + "syscall" + "time" + + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/timeout" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go vmcompute.go + +//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? +//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? +//sys hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? +//sys hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? +//sys hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? +//sys hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? +//sys hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? +//sys hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? +//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? +//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? +//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? +//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? +//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? +//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? +//sys hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsSaveComputeSystem? + +//sys hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? +//sys hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? +//sys hcsCloseProcess(process HcsProcess) (hr error) = vmcompute.HcsCloseProcess? +//sys hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess? +//sys hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? +//sys hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? +//sys hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? +//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? +//sys hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? +//sys hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? + +// errVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously +const errVmcomputeOperationPending = syscall.Errno(0xC0370103) + +// HcsSystem is the handle associated with a created compute system. +type HcsSystem syscall.Handle + +// HcsProcess is the handle associated with a created process in a compute +// system. +type HcsProcess syscall.Handle + +// HcsCallback is the handle associated with the function to call when events +// occur. +type HcsCallback syscall.Handle + +// HcsProcessInformation is the structure used when creating or getting process +// info. +type HcsProcessInformation struct { + // ProcessId is the pid of the created process. + ProcessId uint32 + _ uint32 // reserved padding + // StdInput is the handle associated with the stdin of the process. + StdInput syscall.Handle + // StdOutput is the handle associated with the stdout of the process. + StdOutput syscall.Handle + // StdError is the handle associated with the stderr of the process. + StdError syscall.Handle +} + +func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error { + now := time.Now() + if timeout > 0 { + var cancel gcontext.CancelFunc + ctx, cancel = gcontext.WithTimeout(ctx, timeout) + defer cancel() + } + + // if ctx already has prior deadlines, the shortest timeout takes precedence and is used. + // find the true timeout for reporting + // + // this is mostly an issue with (*UtilityVM).Start(context.Context), which sets its + // own (2 minute) timeout. + deadline, ok := ctx.Deadline() + trueTimeout := timeout + if ok { + trueTimeout = deadline.Sub(now) + log.G(ctx).WithFields(logrus.Fields{ + logfields.Timeout: trueTimeout, + "desiredTimeout": timeout, + }).Trace("Executing syscall with deadline") + } + + done := make(chan error, 1) + go func() { + done <- f() + }() + select { + case <-ctx.Done(): + if ctx.Err() == gcontext.DeadlineExceeded { + log.G(ctx).WithField(logfields.Timeout, trueTimeout). + Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " + + "If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " + + "stuck in the platform API for a significant length of time.") + } + return ctx.Err() + case err := <-done: + return err + } +} + +func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsEnumerateComputeSystems") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("query", query)) + + return computeSystems, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + computeSystemsp *uint16 + resultp *uint16 + ) + err := hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) + if computeSystemsp != nil { + computeSystems = interop.ConvertAndFreeCoTaskMemString(computeSystemsp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsCreateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes( + trace.StringAttribute("id", id), + trace.StringAttribute("configuration", configuration)) + + return computeSystem, result, execute(ctx, timeout.SystemCreate, func() error { + var resultp *uint16 + err := hcsCreateComputeSystem(id, configuration, identity, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsOpenComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return computeSystem, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenComputeSystem(id, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) { + ctx, span := oc.StartSpan(ctx, "HcsCloseComputeSystem") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseComputeSystem(computeSystem) + }) +} + +func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsStartComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemStart, func() error { + var resultp *uint16 + err := hcsStartComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsShutdownComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsShutdownComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsTerminateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsPauseComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemPause, func() error { + var resultp *uint16 + err := hcsPauseComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsResumeComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemResume, func() error { + var resultp *uint16 + err := hcsResumeComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsGetComputeSystemProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetComputeSystemProperties(computeSystem, propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsModifyComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("configuration", configuration)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyComputeSystem(computeSystem, configuration, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsModifyServiceSettings") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyServiceSettings(settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsRegisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterComputeSystemCallback(computeSystem, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := oc.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterComputeSystemCallback(callbackHandle) + }) +} + +func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsCreateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + if span.IsRecordingEvents() { + // wont handle v1 process parameters + if s, err := log.ScrubProcessParameters(processParameters); err == nil { + span.AddAttributes(trace.StringAttribute("processParameters", s)) + } + } + + return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsCreateProcess(computeSystem, processParameters, &processInformation, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsOpenProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.Int64Attribute("pid", int64(pid))) + + return process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenProcess(computeSystem, pid, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { + ctx, span := oc.StartSpan(ctx, "HcsCloseProcess") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseProcess(process) + }) +} + +func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsTerminateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateProcess(process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsSignalProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSignalProcess(process, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsGetProcessInfo") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processInformation, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsGetProcessInfo(process, &processInformation, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsGetProcessProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processProperties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + processPropertiesp *uint16 + resultp *uint16 + ) + err := hcsGetProcessProperties(process, &processPropertiesp, &resultp) + if processPropertiesp != nil { + processProperties = interop.ConvertAndFreeCoTaskMemString(processPropertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsModifyProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyProcess(process, settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsGetServiceProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetServiceProperties(propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsRegisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterProcessCallback(process, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := oc.StartSpan(ctx, "HcsUnregisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterProcessCallback(callbackHandle) + }) +} + +func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := oc.StartSpan(ctx, "HcsSaveComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSaveComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go new file mode 100644 index 0000000000..42368872b7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go @@ -0,0 +1,610 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package vmcompute + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") + procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") + procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") + procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") + procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") + procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") + procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") + procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") + procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") + procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") + procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") + procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") + procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem") + procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") + procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess") + procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") + procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") + procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") + procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") + procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") +) + +func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { + hr = procHcsCloseComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCloseProcess(process HcsProcess) (hr error) { + hr = procHcsCloseProcess.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) +} + +func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { + hr = procHcsCreateComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(processParameters) + if hr != nil { + return + } + return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) +} + +func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { + hr = procHcsCreateProcess.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcsEnumerateComputeSystems(_p0, computeSystems, result) +} + +func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { + hr = procHcsEnumerateComputeSystems.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) +} + +func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + hr = procHcsGetComputeSystemProperties.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) { + hr = procHcsGetProcessInfo.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) { + hr = procHcsGetProcessProperties.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetServiceProperties(_p0, properties, result) +} + +func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + hr = procHcsGetServiceProperties.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsModifyComputeSystem(computeSystem, _p0, result) +} + +func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) { + hr = procHcsModifyComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyProcess(process, _p0, result) +} + +func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) { + hr = procHcsModifyProcess.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyServiceSettings(_p0, result) +} + +func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { + hr = procHcsModifyServiceSettings.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _hcsOpenComputeSystem(_p0, computeSystem, result) +} + +func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) { + hr = procHcsOpenComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) { + hr = procHcsOpenProcess.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsPauseComputeSystem(computeSystem, _p0, result) +} + +func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsPauseComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + hr = procHcsRegisterComputeSystemCallback.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + hr = procHcsRegisterProcessCallback.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsResumeComputeSystem(computeSystem, _p0, result) +} + +func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsResumeComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSaveComputeSystem(computeSystem, _p0, result) +} + +func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsSaveComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsShutdownComputeSystem(computeSystem, _p0, result) +} + +func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsShutdownComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSignalProcess(process, _p0, result) +} + +func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) { + hr = procHcsSignalProcess.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsStartComputeSystem(computeSystem, _p0, result) +} + +func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsStartComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsTerminateComputeSystem(computeSystem, _p0, result) +} + +func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsTerminateComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { + hr = procHcsTerminateProcess.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { + hr = procHcsUnregisterComputeSystemCallback.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { + hr = procHcsUnregisterProcessCallback.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go new file mode 100644 index 0000000000..e12253c947 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -0,0 +1,29 @@ +//go:build windows + +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// ActivateLayer will find the layer with the given id and mount it's filesystem. +// For a read/write layer, the mounted filesystem will appear as a volume on the +// host, while a read-only layer is generally expected to be a no-op. +// An activated layer must later be deactivated via DeactivateLayer. +func ActivateLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ActivateLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = activateLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go new file mode 100644 index 0000000000..792f13f598 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go @@ -0,0 +1,216 @@ +package wclayer + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +type baseLayerReader struct { + s *trace.Span + root string + result chan *fileEntry + proceed chan bool + currentFile *os.File + backupReader *winio.BackupFileReader +} + +func newBaseLayerReader(root string, s *trace.Span) (r *baseLayerReader) { + r = &baseLayerReader{ + s: s, + root: root, + result: make(chan *fileEntry), + proceed: make(chan bool), + } + go r.walk() + return r +} + +func (r *baseLayerReader) walkUntilCancelled() error { + root, err := longpath.LongAbs(r.root) + if err != nil { + return err + } + + r.root = root + + err = filepath.Walk(filepath.Join(r.root, filesPath), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048. + // Handle failure from what may be a golang bug in the conversion of + // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat + // which is called by filepath.Walk will fail when a filename contains + // unicode characters. Skip the recycle bin regardless which is goodness. + if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { + return filepath.SkipDir + } + + r.result <- &fileEntry{path, info, nil} + if !<-r.proceed { + return errorIterationCanceled + } + + return nil + }) + + if err == errorIterationCanceled { + return nil + } + + if err != nil { + return err + } + + utilityVMAbsPath := filepath.Join(r.root, UtilityVMPath) + utilityVMFilesAbsPath := filepath.Join(r.root, UtilityVMFilesPath) + + // Ignore a UtilityVM without Files, that's not _really_ a UtiltyVM + if _, err = os.Lstat(utilityVMFilesAbsPath); err != nil { + if os.IsNotExist(err) { + return io.EOF + } + return err + } + + err = filepath.Walk(utilityVMAbsPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path != utilityVMAbsPath && path != utilityVMFilesAbsPath && !hasPathPrefix(path, utilityVMFilesAbsPath) { + if info.IsDir() { + return filepath.SkipDir + } + return nil + } + + r.result <- &fileEntry{path, info, nil} + if !<-r.proceed { + return errorIterationCanceled + } + + return nil + }) + + if err == errorIterationCanceled { + return nil + } + + if err != nil { + return err + } + + return io.EOF +} + +func (r *baseLayerReader) walk() { + defer close(r.result) + if !<-r.proceed { + return + } + + err := r.walkUntilCancelled() + if err != nil { + for { + r.result <- &fileEntry{err: err} + if !<-r.proceed { + return + } + } + } +} + +func (r *baseLayerReader) reset() { + if r.backupReader != nil { + r.backupReader.Close() + r.backupReader = nil + } + if r.currentFile != nil { + r.currentFile.Close() + r.currentFile = nil + } +} + +func (r *baseLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { + r.reset() + r.proceed <- true + fe := <-r.result + if fe == nil { + err = errors.New("BaseLayerReader closed") + return + } + if fe.err != nil { + err = fe.err + return + } + + path, err = filepath.Rel(r.root, fe.path) + if err != nil { + return + } + + f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) + if err != nil { + return + } + defer func() { + if f != nil { + f.Close() + } + }() + + fileInfo, err = winio.GetFileBasicInfo(f) + if err != nil { + return + } + + size = fe.fi.Size() + r.backupReader = winio.NewBackupFileReader(f, true) + + r.currentFile = f + f = nil + return +} + +func (r *baseLayerReader) LinkInfo() (uint32, *winio.FileIDInfo, error) { + fileStandardInfo, err := winio.GetFileStandardInfo(r.currentFile) + if err != nil { + return 0, nil, err + } + fileIDInfo, err := winio.GetFileID(r.currentFile) + if err != nil { + return 0, nil, err + } + return fileStandardInfo.NumberOfLinks, fileIDInfo, nil +} + +func (r *baseLayerReader) Read(b []byte) (int, error) { + if r.backupReader == nil { + return 0, io.EOF + } + return r.backupReader.Read(b) +} + +func (r *baseLayerReader) Close() (err error) { + defer r.s.End() + defer func() { + oc.SetSpanStatus(r.s, err) + close(r.proceed) + }() + r.proceed <- false + // The r.result channel will be closed once walk() returns + <-r.result + r.reset() + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go new file mode 100644 index 0000000000..aea8b421ef --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go @@ -0,0 +1,183 @@ +//go:build windows + +package wclayer + +import ( + "context" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" + "go.opencensus.io/trace" +) + +type baseLayerWriter struct { + ctx context.Context + s *trace.Span + + root *os.File + f *os.File + bw *winio.BackupFileWriter + err error + hasUtilityVM bool + dirInfo []dirInfo +} + +type dirInfo struct { + path string + fileInfo winio.FileBasicInfo +} + +// reapplyDirectoryTimes reapplies directory modification, creation, etc. times +// after processing of the directory tree has completed. The times are expected +// to be ordered such that parent directories come before child directories. +func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { + for i := range dis { + di := &dis[len(dis)-i-1] // reverse order: process child directories first + f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT) + if err != nil { + return err + } + + err = winio.SetFileBasicInfo(f, &di.fileInfo) + f.Close() + if err != nil { + return err + } + } + return nil +} + +func (w *baseLayerWriter) closeCurrentFile() error { + if w.f != nil { + err := w.bw.Close() + err2 := w.f.Close() + w.f = nil + w.bw = nil + if err != nil { + return err + } + if err2 != nil { + return err2 + } + } + return nil +} + +func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + if filepath.ToSlash(name) == `UtilityVM/Files` { + w.hasUtilityVM = true + } + + var f *os.File + defer func() { + if f != nil { + f.Close() + } + }() + + extraFlags := uint32(0) + if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + extraFlags |= winapi.FILE_DIRECTORY_FILE + w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) + } + + mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) + f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags) + if err != nil { + return hcserror.New(err, "Failed to safefile.OpenRelative", name) + } + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return hcserror.New(err, "Failed to SetFileBasicInfo", name) + } + + w.f = f + w.bw = winio.NewBackupFileWriter(f, true) + f = nil + return nil +} + +func (w *baseLayerWriter) AddLink(name string, target string) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + return safefile.LinkRelative(target, w.root, name, w.root) +} + +func (w *baseLayerWriter) Remove(name string) error { + return errors.New("base layer cannot have tombstones") +} + +func (w *baseLayerWriter) Write(b []byte) (int, error) { + n, err := w.bw.Write(b) + if err != nil { + w.err = err + } + return n, err +} + +func (w *baseLayerWriter) Close() (err error) { + defer w.s.End() + defer func() { oc.SetSpanStatus(w.s, err) }() + defer func() { + w.root.Close() + w.root = nil + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + if w.err == nil { + // Restore the file times of all the directories, since they may have + // been modified by creating child directories. + err = reapplyDirectoryTimes(w.root, w.dirInfo) + if err != nil { + return err + } + + err = ProcessBaseLayer(w.ctx, w.root.Name()) + if err != nil { + return err + } + + if w.hasUtilityVM { + err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root) + if err != nil { + return err + } + err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM")) + if err != nil { + return err + } + } + } + return w.err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go new file mode 100644 index 0000000000..c542f556c0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go @@ -0,0 +1,157 @@ +package wclayer + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +var hiveNames = []string{"DEFAULT", "SAM", "SECURITY", "SOFTWARE", "SYSTEM"} + +// Ensure the given file exists as an ordinary file, and create a minimal hive file if not. +func ensureHive(path string, root *os.File) (err error) { + _, err = safefile.LstatRelative(path, root) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("accessing %s: %w", path, err) + } + + version := windows.RtlGetVersion() + if version == nil { + return fmt.Errorf("failed to get OS version") + } + + var fullPath string + fullPath, err = longpath.LongAbs(filepath.Join(root.Name(), path)) + if err != nil { + return fmt.Errorf("getting path: %w", err) + } + + var key winapi.ORHKey + err = winapi.ORCreateHive(&key) + if err != nil { + return fmt.Errorf("creating hive: %w", err) + } + + defer func() { + closeErr := winapi.ORCloseHive(key) + if closeErr != nil && err == nil { + err = fmt.Errorf("closing hive key: %w", closeErr) + } + }() + + err = winapi.ORSaveHive(key, fullPath, version.MajorVersion, version.MinorVersion) + if err != nil { + return fmt.Errorf("saving hive: %w", err) + } + + return nil +} + +func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) { + // The base layer registry hives will be copied from here + const hiveSourcePath = "Files\\Windows\\System32\\config" + if err = safefile.MkdirAllRelative(hiveSourcePath, root); err != nil { + return + } + + for _, hiveName := range hiveNames { + hivePath := filepath.Join(hiveSourcePath, hiveName) + if err = ensureHive(hivePath, root); err != nil { + return + } + } + + stat, err := safefile.LstatRelative(UtilityVMFilesPath, root) + + if os.IsNotExist(err) { + return false, nil + } + + if err != nil { + return + } + + if !stat.Mode().IsDir() { + fullPath := filepath.Join(root.Name(), UtilityVMFilesPath) + return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String()) + } + + const bcdRelativePath = "EFI\\Microsoft\\Boot\\BCD" + + // Just check that this exists as a regular file. If it exists but is not a valid registry hive, + // ProcessUtilityVMImage will complain: + // "The registry could not read in, or write out, or flush, one of the files that contain the system's image of the registry." + bcdPath := filepath.Join(UtilityVMFilesPath, bcdRelativePath) + + stat, err = safefile.LstatRelative(bcdPath, root) + if err != nil { + return false, errors.Wrapf(err, "UtilityVM must contain '%s'", bcdRelativePath) + } + + if !stat.Mode().IsRegular() { + fullPath := filepath.Join(root.Name(), bcdPath) + return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String()) + } + + return true, nil +} + +func convertToBaseLayer(ctx context.Context, root *os.File) error { + hasUtilityVM, err := ensureBaseLayer(root) + + if err != nil { + return err + } + + if err := ProcessBaseLayer(ctx, root.Name()); err != nil { + return err + } + + if !hasUtilityVM { + return nil + } + + err = safefile.EnsureNotReparsePointRelative(UtilityVMPath, root) + if err != nil { + return err + } + + utilityVMPath := filepath.Join(root.Name(), UtilityVMPath) + return ProcessUtilityVMImage(ctx, utilityVMPath) +} + +// ConvertToBaseLayer processes a candidate base layer, i.e. a directory +// containing the desired file content under Files/, and optionally the +// desired file content for a UtilityVM under UtilityVM/Files/ +func ConvertToBaseLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ConvertToBaseLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + root, err := safefile.OpenRoot(path) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + defer func() { + if err2 := root.Close(); err == nil && err2 != nil { + err = hcserror.New(err2, title+" - failed", "") + } + }() + + if err = convertToBaseLayer(ctx, root); err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go new file mode 100644 index 0000000000..932475723a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -0,0 +1,29 @@ +//go:build windows + +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// CreateLayer creates a new, empty, read-only layer on the filesystem based on +// the parent layer provided. +func CreateLayer(ctx context.Context, path, parent string) (err error) { + title := "hcsshim::CreateLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parent", parent)) + + err = createLayer(&stdDriverInfo, path, parent) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go new file mode 100644 index 0000000000..5c9d5d2507 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -0,0 +1,36 @@ +//go:build windows + +package wclayer + +import ( + "context" + "strings" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// CreateScratchLayer creates and populates new read-write layer for use by a container. +// This requires the full list of paths to all parent layers up to the base +func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { + title := "hcsshim::CreateScratchLayer" + ctx, span := oc.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + err = createSandboxLayer(&stdDriverInfo, path, 0, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go new file mode 100644 index 0000000000..e3bc77cbc8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go @@ -0,0 +1,26 @@ +//go:build windows + +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. +func DeactivateLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::DeactivateLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = deactivateLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title+"- failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go new file mode 100644 index 0000000000..d0a59efe12 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -0,0 +1,27 @@ +//go:build windows + +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// DestroyLayer will remove the on-disk files representing the layer with the given +// path, including that layer's containing folder, if any. +func DestroyLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::DestroyLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = destroyLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go new file mode 100644 index 0000000000..dd1d555804 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go @@ -0,0 +1,4 @@ +// Package wclayer provides bindings to HCS's legacy layer management API and +// provides a higher level interface around these calls for container layer +// management. +package wclayer diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go new file mode 100644 index 0000000000..e2ec27ad08 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -0,0 +1,142 @@ +//go:build windows + +package wclayer + +import ( + "context" + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "go.opencensus.io/trace" +) + +// ExpandScratchSize expands the size of a layer to at least size bytes. +func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) { + title := "hcsshim::ExpandScratchSize" + ctx, span := oc.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.Int64Attribute("size", int64(size))) + + err = expandSandboxSize(&stdDriverInfo, path, size) + if err != nil { + return hcserror.New(err, title, "") + } + + // Manually expand the volume now in order to work around bugs in 19H1 and + // prerelease versions of Vb. Remove once this is fixed in Windows. + if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { + err = expandSandboxVolume(ctx, path) + if err != nil { + return err + } + } + return nil +} + +type virtualStorageType struct { + DeviceID uint32 + VendorID [16]byte +} + +type openVersion2 struct { + GetInfoOnly int32 // bool but 4-byte aligned + ReadOnly int32 // bool but 4-byte aligned + ResiliencyGUID [16]byte // GUID +} + +type openVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 openVersion2 +} + +func attachVhd(path string) (syscall.Handle, error) { + var ( + defaultType virtualStorageType + handle syscall.Handle + ) + parameters := openVirtualDiskParameters{Version: 2} + err := openVirtualDisk( + &defaultType, + path, + 0, + 0, + ¶meters, + &handle) + if err != nil { + return 0, &os.PathError{Op: "OpenVirtualDisk", Path: path, Err: err} + } + err = attachVirtualDisk(handle, 0, 0, 0, 0, 0) + if err != nil { + syscall.Close(handle) + return 0, &os.PathError{Op: "AttachVirtualDisk", Path: path, Err: err} + } + return handle, nil +} + +func expandSandboxVolume(ctx context.Context, path string) error { + // Mount the sandbox VHD temporarily. + vhdPath := filepath.Join(path, "sandbox.vhdx") + vhd, err := attachVhd(vhdPath) + if err != nil { + return &os.PathError{Op: "OpenVirtualDisk", Path: vhdPath, Err: err} + } + defer syscall.Close(vhd) + + // Open the volume. + volumePath, err := GetLayerMountPath(ctx, path) + if err != nil { + return err + } + if volumePath[len(volumePath)-1] == '\\' { + volumePath = volumePath[:len(volumePath)-1] + } + volume, err := os.OpenFile(volumePath, os.O_RDWR, 0) + if err != nil { + return err + } + defer volume.Close() + + // Get the volume's underlying partition size in NTFS clusters. + var ( + partitionSize int64 + bytes uint32 + ) + const _IOCTL_DISK_GET_LENGTH_INFO = 0x0007405C + err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _IOCTL_DISK_GET_LENGTH_INFO, nil, 0, (*byte)(unsafe.Pointer(&partitionSize)), 8, &bytes, nil) + if err != nil { + return &os.PathError{Op: "IOCTL_DISK_GET_LENGTH_INFO", Path: volume.Name(), Err: err} + } + const ( + clusterSize = 4096 + sectorSize = 512 + ) + targetClusters := partitionSize / clusterSize + + // Get the volume's current size in NTFS clusters. + var volumeSize int64 + err = getDiskFreeSpaceEx(volume.Name()+"\\", nil, &volumeSize, nil) + if err != nil { + return &os.PathError{Op: "GetDiskFreeSpaceEx", Path: volume.Name(), Err: err} + } + volumeClusters := volumeSize / clusterSize + + // Only resize the volume if there is space to grow, otherwise this will + // fail with invalid parameter. NTFS reserves one cluster. + if volumeClusters+1 < targetClusters { + targetSectors := targetClusters * (clusterSize / sectorSize) + const _FSCTL_EXTEND_VOLUME = 0x000900F0 + err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _FSCTL_EXTEND_VOLUME, (*byte)(unsafe.Pointer(&targetSectors)), 8, nil, 0, &bytes, nil) + if err != nil { + return &os.PathError{Op: "FSCTL_EXTEND_VOLUME", Path: volume.Name(), Err: err} + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go new file mode 100644 index 0000000000..d4c677aabf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -0,0 +1,107 @@ +//go:build windows + +package wclayer + +import ( + "context" + "os" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// ExportLayer will create a folder at exportFolderPath and fill that folder with +// the transport format version of the layer identified by layerId. This transport +// format includes any metadata required for later importing the layer (using +// ImportLayer), and requires the full list of parent layer paths in order to +// perform the export. +func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ExportLayer" + ctx, span := oc.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("exportFolderPath", exportFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} + +// LayerReader is an interface that supports reading an existing container image layer. +type LayerReader interface { + // Next advances to the next file and returns the name, size, and file info + Next() (string, int64, *winio.FileBasicInfo, error) + // LinkInfo returns the number of links and the file identifier for the current file. + LinkInfo() (uint32, *winio.FileIDInfo, error) + // Read reads data from the current file, in the format of a Win32 backup stream, and + // returns the number of bytes read. + Read(b []byte) (int, error) + // Close finishes the layer reading process and releases any resources. + Close() error +} + +// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. +// The caller must have taken the SeBackupPrivilege privilege +// to call this and any methods on the resulting LayerReader. +func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) { + ctx, span := oc.StartSpan(ctx, "hcsshim::NewLayerReader") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + if len(parentLayerPaths) == 0 { + // This is a base layer. It gets exported differently. + return newBaseLayerReader(path, span), nil + } + + exportPath, err := os.MkdirTemp("", "hcs") + if err != nil { + return nil, err + } + err = ExportLayer(ctx, path, exportPath, parentLayerPaths) + if err != nil { + os.RemoveAll(exportPath) + return nil, err + } + return &legacyLayerReaderWrapper{ + ctx: ctx, + s: span, + legacyLayerReader: newLegacyLayerReader(exportPath), + }, nil +} + +type legacyLayerReaderWrapper struct { + ctx context.Context + s *trace.Span + + *legacyLayerReader +} + +func (r *legacyLayerReaderWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() + + err = r.legacyLayerReader.Close() + os.RemoveAll(r.root) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go new file mode 100644 index 0000000000..715e06e379 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -0,0 +1,52 @@ +//go:build windows + +package wclayer + +import ( + "context" + "syscall" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// GetLayerMountPath will look for a mounted layer with the given path and return +// the path at which that layer can be accessed. This path may be a volume path +// if the layer is a mounted read-write layer, otherwise it is expected to be the +// folder path at which the layer is stored. +func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { + title := "hcsshim::GetLayerMountPath" + ctx, span := oc.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + var mountPathLength uintptr = 0 + + // Call the procedure itself. + log.G(ctx).Debug("Calling proc (1)") + err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) + if err != nil { + return "", hcserror.New(err, title, "(first call)") + } + + // Allocate a mount path of the returned length. + if mountPathLength == 0 { + return "", nil + } + mountPathp := make([]uint16, mountPathLength) + mountPathp[0] = 0 + + // Call the procedure again + log.G(ctx).Debug("Calling proc (2)") + err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) + if err != nil { + return "", hcserror.New(err, title, "(second call)") + } + + mountPath := syscall.UTF16ToString(mountPathp[0:]) + span.AddAttributes(trace.StringAttribute("mountPath", mountPath)) + return mountPath, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go new file mode 100644 index 0000000000..5e400fb209 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -0,0 +1,31 @@ +//go:build windows + +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// GetSharedBaseImages will enumerate the images stored in the common central +// image store and return descriptive info about those images for the purpose +// of registering them with the graphdriver, graph, and tagstore. +func GetSharedBaseImages(ctx context.Context) (_ string, err error) { + title := "hcsshim::GetSharedBaseImages" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + var buffer *uint16 + err = getBaseImages(&buffer) + if err != nil { + return "", hcserror.New(err, title, "") + } + imageData := interop.ConvertAndFreeCoTaskMemString(buffer) + span.AddAttributes(trace.StringAttribute("imageData", imageData)) + return imageData, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go new file mode 100644 index 0000000000..20217ed81b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -0,0 +1,28 @@ +//go:build windows + +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// GrantVmAccess adds access to a file for a given VM +func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) { + title := "hcsshim::GrantVmAccess" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("vm-id", vmid), + trace.StringAttribute("path", filepath)) + + err = grantVmAccess(vmid, filepath) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go new file mode 100644 index 0000000000..50f669a261 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -0,0 +1,167 @@ +//go:build windows + +package wclayer + +import ( + "context" + "os" + "path/filepath" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/safefile" + "go.opencensus.io/trace" +) + +// ImportLayer will take the contents of the folder at importFolderPath and import +// that into a layer with the id layerId. Note that in order to correctly populate +// the layer and interperet the transport format, all parent layers must already +// be present on the system at the paths provided in parentLayerPaths. +func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ImportLayer" + ctx, span := oc.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("importFolderPath", importFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + err = importLayer(&stdDriverInfo, path, importFolderPath, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} + +// LayerWriter is an interface that supports writing a new container image layer. +type LayerWriter interface { + // Add adds a file to the layer with given metadata. + Add(name string, fileInfo *winio.FileBasicInfo) error + // AddLink adds a hard link to the layer. The target must already have been added. + AddLink(name string, target string) error + // Remove removes a file that was present in a parent layer from the layer. + Remove(name string) error + // Write writes data to the current file. The data must be in the format of a Win32 + // backup stream. + Write(b []byte) (int, error) + // Close finishes the layer writing process and releases any resources. + Close() error +} + +type legacyLayerWriterWrapper struct { + ctx context.Context + s *trace.Span + + *legacyLayerWriter + path string + parentLayerPaths []string +} + +func (r *legacyLayerWriterWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() + defer os.RemoveAll(r.root.Name()) + defer r.legacyLayerWriter.CloseRoots() + + err = r.legacyLayerWriter.Close() + if err != nil { + return err + } + + if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { + return err + } + for _, name := range r.Tombstones { + if err = safefile.RemoveRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) { + return err + } + } + // Add any hard links that were collected. + for _, lnk := range r.PendingLinks { + if err = safefile.RemoveRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) { + return err + } + if err = safefile.LinkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil { + return err + } + } + + // The reapplyDirectoryTimes must be called AFTER we are done with Tombstone + // deletion and hard link creation. This is because Tombstone deletion and hard link + // creation updates the directory last write timestamps so that will change the + // timestamps added by the `Add` call. Some container applications depend on the + // correctness of these timestamps and so we should change the timestamps back to + // the original value (i.e the value provided in the Add call) after this + // processing is done. + err = reapplyDirectoryTimes(r.destRoot, r.changedDi) + if err != nil { + return err + } + + // Prepare the utility VM for use if one is present in the layer. + if r.HasUtilityVM { + err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot) + if err != nil { + return err + } + err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM")) + if err != nil { + return err + } + } + return nil +} + +// NewLayerWriter returns a new layer writer for creating a layer on disk. +// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges +// to call this and any methods on the resulting LayerWriter. +func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) { + ctx, span := oc.StartSpan(ctx, "hcsshim::NewLayerWriter") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + if len(parentLayerPaths) == 0 { + // This is a base layer. It gets imported differently. + f, err := safefile.OpenRoot(path) + if err != nil { + return nil, err + } + return &baseLayerWriter{ + ctx: ctx, + s: span, + root: f, + }, nil + } + + importPath, err := os.MkdirTemp("", "hcs") + if err != nil { + return nil, err + } + w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path) + if err != nil { + return nil, err + } + return &legacyLayerWriterWrapper{ + ctx: ctx, + s: span, + legacyLayerWriter: w, + path: importPath, + parentLayerPaths: parentLayerPaths, + }, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go new file mode 100644 index 0000000000..4d82977ea1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -0,0 +1,30 @@ +//go:build windows + +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// LayerExists will return true if a layer with the given id exists and is known +// to the system. +func LayerExists(ctx context.Context, path string) (_ bool, err error) { + title := "hcsshim::LayerExists" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + // Call the procedure itself. + var exists uint32 + err = layerExists(&stdDriverInfo, path, &exists) + if err != nil { + return false, hcserror.New(err, title, "") + } + span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) + return exists != 0, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go new file mode 100644 index 0000000000..d4805f1444 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go @@ -0,0 +1,24 @@ +//go:build windows + +package wclayer + +import ( + "context" + "path/filepath" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// LayerID returns the layer ID of a layer on disk. +func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) { + title := "hcsshim::LayerID" + ctx, span := oc.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + _, file := filepath.Split(path) + return NameToGuid(ctx, file) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go new file mode 100644 index 0000000000..ee17dd3d1a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go @@ -0,0 +1,127 @@ +//go:build windows + +package wclayer + +// This file contains utility functions to support storage (graph) related +// functionality. + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/sirupsen/logrus" +) + +/* +To pass into syscall, we need a struct matching the following: + +enum GraphDriverType +{ + DiffDriver, + FilterDriver +}; + +struct DriverInfo { + GraphDriverType Flavour; + LPCWSTR HomeDir; +}; +*/ + +type driverInfo struct { + Flavour int + HomeDirp *uint16 +} + +var ( + utf16EmptyString uint16 + stdDriverInfo = driverInfo{1, &utf16EmptyString} +) + +/* +To pass into syscall, we need a struct matching the following: + +typedef struct _WC_LAYER_DESCRIPTOR { + + // + // The ID of the layer + // + + GUID LayerId; + + // + // Additional flags + // + + union { + struct { + ULONG Reserved : 31; + ULONG Dirty : 1; // Created from sandbox as a result of snapshot + }; + ULONG Value; + } Flags; + + // + // Path to the layer root directory, null-terminated + // + + PCWSTR Path; + +} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; +*/ +type WC_LAYER_DESCRIPTOR struct { + LayerId guid.GUID + Flags uint32 + Pathp *uint16 +} + +func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { + // Array of descriptors that gets constructed. + var layers []WC_LAYER_DESCRIPTOR + + for i := 0; i < len(parentLayerPaths); i++ { + g, err := LayerID(ctx, parentLayerPaths[i]) + if err != nil { + logrus.WithError(err).Debug("Failed to convert name to guid") + return nil, err + } + + p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) + if err != nil { + logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer") + return nil, err + } + + layers = append(layers, WC_LAYER_DESCRIPTOR{ + LayerId: g, + Flags: 0, + Pathp: p, + }) + } + + return layers, nil +} + +// GetLayerUvmBuild looks for a file named `uvmbuildversion` at `layerPath\uvmbuildversion` and returns the +// build number of the UVM from that file. +func GetLayerUvmBuild(layerPath string) (uint16, error) { + data, err := os.ReadFile(filepath.Join(layerPath, UvmBuildFileName)) + if err != nil { + return 0, err + } + ver, err := strconv.ParseUint(string(data), 10, 16) + if err != nil { + return 0, err + } + return uint16(ver), nil +} + +// WriteLayerUvmBuildFile writes a file at path `layerPath\uvmbuildversion` that contains the given `build` +// version for future reference. +func WriteLayerUvmBuildFile(layerPath string, build uint16) error { + return os.WriteFile(filepath.Join(layerPath, UvmBuildFileName), []byte(fmt.Sprintf("%d", build)), 0777) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go new file mode 100644 index 0000000000..807d833108 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -0,0 +1,832 @@ +//go:build windows + +package wclayer + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" +) + +var errorIterationCanceled = errors.New("") + +var mutatedUtilityVMFiles = map[string]bool{ + `EFI\Microsoft\Boot\BCD`: true, + `EFI\Microsoft\Boot\BCD.LOG`: true, + `EFI\Microsoft\Boot\BCD.LOG1`: true, + `EFI\Microsoft\Boot\BCD.LOG2`: true, +} + +const ( + filesPath = `Files` + HivesPath = `Hives` + UtilityVMPath = `UtilityVM` + UtilityVMFilesPath = `UtilityVM\Files` + RegFilesPath = `Files\Windows\System32\config` + BcdFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\BCD` + BootMgrFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\bootmgfw.efi` + ContainerBaseVhd = `blank-base.vhdx` + ContainerScratchVhd = `blank.vhdx` + UtilityVMBaseVhd = `SystemTemplateBase.vhdx` + UtilityVMScratchVhd = `SystemTemplate.vhdx` + LayoutFileName = `layout` + UvmBuildFileName = `uvmbuildversion` +) + +func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { + return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition) +} + +func hasPathPrefix(p, prefix string) bool { + return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\' +} + +type fileEntry struct { + path string + fi os.FileInfo + err error +} + +type legacyLayerReader struct { + root string + result chan *fileEntry + proceed chan bool + currentFile *os.File + backupReader *winio.BackupFileReader +} + +// newLegacyLayerReader returns a new LayerReader that can read the Windows +// container layer transport format from disk. +func newLegacyLayerReader(root string) *legacyLayerReader { + r := &legacyLayerReader{ + root: root, + result: make(chan *fileEntry), + proceed: make(chan bool), + } + go r.walk() + return r +} + +func readTombstones(path string) (map[string]([]string), error) { + tf, err := os.Open(filepath.Join(path, "tombstones.txt")) + if err != nil { + return nil, err + } + defer tf.Close() + s := bufio.NewScanner(tf) + if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { + return nil, errors.New("invalid tombstones file") + } + + ts := make(map[string]([]string)) + for s.Scan() { + t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\` + dir := filepath.Dir(t) + ts[dir] = append(ts[dir], t) + } + if err = s.Err(); err != nil { + return nil, err + } + + return ts, nil +} + +func (r *legacyLayerReader) walkUntilCancelled() error { + root, err := longpath.LongAbs(r.root) + if err != nil { + return err + } + + r.root = root + ts, err := readTombstones(r.root) + if err != nil { + return err + } + + err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048. + // Handle failure from what may be a golang bug in the conversion of + // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat + // which is called by filepath.Walk will fail when a filename contains + // unicode characters. Skip the recycle bin regardless which is goodness. + if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { + return filepath.SkipDir + } + + if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { + return nil + } + + r.result <- &fileEntry{path, info, nil} + if !<-r.proceed { + return errorIterationCanceled + } + + // List all the tombstones. + if info.IsDir() { + relPath, err := filepath.Rel(r.root, path) + if err != nil { + return err + } + if dts, ok := ts[relPath]; ok { + for _, t := range dts { + r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil} + if !<-r.proceed { + return errorIterationCanceled + } + } + } + } + return nil + }) + if err == errorIterationCanceled { + return nil + } + if err == nil { + return io.EOF + } + return err +} + +func (r *legacyLayerReader) walk() { + defer close(r.result) + if !<-r.proceed { + return + } + + err := r.walkUntilCancelled() + if err != nil { + for { + r.result <- &fileEntry{err: err} + if !<-r.proceed { + return + } + } + } +} + +func (r *legacyLayerReader) reset() { + if r.backupReader != nil { + r.backupReader.Close() + r.backupReader = nil + } + if r.currentFile != nil { + r.currentFile.Close() + r.currentFile = nil + } +} + +func findBackupStreamSize(r io.Reader) (int64, error) { + br := winio.NewBackupStreamReader(r) + for { + hdr, err := br.Next() + if err != nil { + if err == io.EOF { + err = nil + } + return 0, err + } + if hdr.Id == winio.BackupData { + return hdr.Size, nil + } + } +} + +func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { + r.reset() + r.proceed <- true + fe := <-r.result + if fe == nil { + err = errors.New("LegacyLayerReader closed") + return + } + if fe.err != nil { + err = fe.err + return + } + + path, err = filepath.Rel(r.root, fe.path) + if err != nil { + return + } + + if fe.fi == nil { + // This is a tombstone. Return a nil fileInfo. + return + } + + if fe.fi.IsDir() && hasPathPrefix(path, filesPath) { + fe.path += ".$wcidirs$" + } + + f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) + if err != nil { + return + } + defer func() { + if f != nil { + f.Close() + } + }() + + fileInfo, err = winio.GetFileBasicInfo(f) + if err != nil { + return + } + + if !hasPathPrefix(path, filesPath) { + size = fe.fi.Size() + r.backupReader = winio.NewBackupFileReader(f, false) + if path == HivesPath || path == filesPath { + // The Hives directory has a non-deterministic file time because of the + // nature of the import process. Use the times from System_Delta. + var g *os.File + g, err = os.Open(filepath.Join(r.root, HivesPath, `System_Delta`)) + if err != nil { + return + } + attr := fileInfo.FileAttributes + fileInfo, err = winio.GetFileBasicInfo(g) + g.Close() + if err != nil { + return + } + fileInfo.FileAttributes = attr + } + + // The creation time and access time get reset for files outside of the Files path. + fileInfo.CreationTime = fileInfo.LastWriteTime + fileInfo.LastAccessTime = fileInfo.LastWriteTime + } else { + // The file attributes are written before the backup stream. + var attr uint32 + err = binary.Read(f, binary.LittleEndian, &attr) + if err != nil { + return + } + fileInfo.FileAttributes = attr + beginning := int64(4) + + // Find the accurate file size. + if !fe.fi.IsDir() { + size, err = findBackupStreamSize(f) + if err != nil { + err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err} + return + } + } + + // Return back to the beginning of the backup stream. + _, err = f.Seek(beginning, 0) + if err != nil { + return + } + } + + r.currentFile = f + f = nil + return +} + +func (r *legacyLayerReader) LinkInfo() (uint32, *winio.FileIDInfo, error) { + fileStandardInfo, err := winio.GetFileStandardInfo(r.currentFile) + if err != nil { + return 0, nil, err + } + fileIDInfo, err := winio.GetFileID(r.currentFile) + if err != nil { + return 0, nil, err + } + return fileStandardInfo.NumberOfLinks, fileIDInfo, nil +} + +func (r *legacyLayerReader) Read(b []byte) (int, error) { + if r.backupReader == nil { + if r.currentFile == nil { + return 0, io.EOF + } + return r.currentFile.Read(b) + } + return r.backupReader.Read(b) +} + +func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) { + if r.backupReader == nil { + if r.currentFile == nil { + return 0, errors.New("no current file") + } + return r.currentFile.Seek(offset, whence) + } + return 0, errors.New("seek not supported on this stream") +} + +func (r *legacyLayerReader) Close() error { + r.proceed <- false + <-r.result + r.reset() + return nil +} + +type pendingLink struct { + Path, Target string + TargetRoot *os.File +} + +type pendingDir struct { + Path string + Root *os.File +} + +type legacyLayerWriter struct { + root *os.File + destRoot *os.File + parentRoots []*os.File + currentFile *os.File + bufWriter *bufio.Writer + currentFileName string + currentFileRoot *os.File + backupWriter *winio.BackupFileWriter + Tombstones []string + HasUtilityVM bool + changedDi []dirInfo + addedFiles map[string]bool + PendingLinks []pendingLink + pendingDirs []pendingDir + currentIsDir bool +} + +// newLegacyLayerWriter returns a LayerWriter that can write the container layer +// transport format to disk. +func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { + w = &legacyLayerWriter{ + addedFiles: make(map[string]bool), + } + defer func() { + if err != nil { + w.CloseRoots() + w = nil + } + }() + w.root, err = safefile.OpenRoot(root) + if err != nil { + return + } + w.destRoot, err = safefile.OpenRoot(destRoot) + if err != nil { + return + } + for _, r := range parentRoots { + f, err := safefile.OpenRoot(r) + if err != nil { + return w, err + } + w.parentRoots = append(w.parentRoots, f) + } + w.bufWriter = bufio.NewWriterSize(io.Discard, 65536) + return +} + +func (w *legacyLayerWriter) CloseRoots() { + if w.root != nil { + w.root.Close() + w.root = nil + } + if w.destRoot != nil { + w.destRoot.Close() + w.destRoot = nil + } + for i := range w.parentRoots { + _ = w.parentRoots[i].Close() + } + w.parentRoots = nil +} + +func (w *legacyLayerWriter) initUtilityVM() error { + if !w.HasUtilityVM { + err := safefile.MkdirRelative(UtilityVMPath, w.destRoot) + if err != nil { + return err + } + // Server 2016 does not support multiple layers for the utility VM, so + // clone the utility VM from the parent layer into this layer. Use hard + // links to avoid unnecessary copying, since most of the files are + // immutable. + err = cloneTree(w.parentRoots[0], w.destRoot, UtilityVMFilesPath, mutatedUtilityVMFiles) + if err != nil { + return fmt.Errorf("cloning the parent utility VM image failed: %s", err) + } + w.HasUtilityVM = true + } + return nil +} + +func (w *legacyLayerWriter) reset() error { + err := w.bufWriter.Flush() + if err != nil { + return err + } + w.bufWriter.Reset(io.Discard) + if w.currentIsDir { + r := w.currentFile + br := winio.NewBackupStreamReader(r) + // Seek to the beginning of the backup stream, skipping the fileattrs + if _, err := r.Seek(4, io.SeekStart); err != nil { + return err + } + + for { + bhdr, err := br.Next() + if err == io.EOF { + // end of backupstream data + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupReparseData: + // The current file is a `.$wcidirs$` metadata file that + // describes a directory reparse point. Delete the placeholder + // directory to prevent future files being added into the + // destination of the reparse point during the ImportLayer call + if err := safefile.RemoveRelative(w.currentFileName, w.currentFileRoot); err != nil { + return err + } + w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot}) + default: + // ignore all other stream types, as we only care about directory reparse points + } + } + w.currentIsDir = false + } + if w.backupWriter != nil { + w.backupWriter.Close() + w.backupWriter = nil + } + if w.currentFile != nil { + w.currentFile.Close() + w.currentFile = nil + w.currentFileName = "" + w.currentFileRoot = nil + } + return nil +} + +// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata +func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { + src, err := safefile.OpenRelative( + subPath, + srcRoot, + syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) + if err != nil { + return nil, err + } + defer src.Close() + srcr := winio.NewBackupFileReader(src, true) + defer srcr.Close() + + fileInfo, err = winio.GetFileBasicInfo(src) + if err != nil { + return nil, err + } + + extraFlags := uint32(0) + if isDir { + extraFlags |= winapi.FILE_DIRECTORY_FILE + } + dest, err := safefile.OpenRelative( + subPath, + destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + winapi.FILE_CREATE, + extraFlags) + if err != nil { + return nil, err + } + defer dest.Close() + + err = winio.SetFileBasicInfo(dest, fileInfo) + if err != nil { + return nil, err + } + + destw := winio.NewBackupFileWriter(dest, true) + defer func() { + cerr := destw.Close() + if err == nil { + err = cerr + } + }() + + _, err = io.Copy(destw, srcr) + if err != nil { + return nil, err + } + + return fileInfo, nil +} + +// cloneTree clones a directory tree using hard links. It skips hard links for +// the file names in the provided map and just copies those files. +func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error { + var di []dirInfo + err := safefile.EnsureNotReparsePointRelative(subPath, srcRoot) + if err != nil { + return err + } + err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath) + if err != nil { + return err + } + + fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes + // Directories, reparse points, and files that will be mutated during + // utility VM import must be copied. All other files can be hard linked. + isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 + // In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink. + // See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc + // Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly + isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 + + if isDir || isReparsePoint || mutatedFiles[relPath] { + fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir) + if err != nil { + return err + } + if isDir { + di = append(di, dirInfo{path: relPath, fileInfo: *fi}) + } + } else { + err = safefile.LinkRelative(relPath, srcRoot, relPath, destRoot) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + + return reapplyDirectoryTimes(destRoot, di) +} + +func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { + if err := w.reset(); err != nil { + return err + } + + if name == UtilityVMPath { + return w.initUtilityVM() + } + + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo}) + } + + name = filepath.Clean(name) + if hasPathPrefix(name, UtilityVMPath) { + if !w.HasUtilityVM { + return errors.New("missing UtilityVM directory") + } + if !hasPathPrefix(name, UtilityVMFilesPath) && name != UtilityVMFilesPath { + return errors.New("invalid UtilityVM layer") + } + createDisposition := uint32(winapi.FILE_OPEN) + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + st, err := safefile.LstatRelative(name, w.destRoot) + if err != nil && !os.IsNotExist(err) { + return err + } + if st != nil { + // Delete the existing file/directory if it is not the same type as this directory. + existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + if err = safefile.RemoveAllRelative(name, w.destRoot); err != nil { + return err + } + st = nil + } + } + if st == nil { + if err = safefile.MkdirRelative(name, w.destRoot); err != nil { + return err + } + } + } else { + // Overwrite any existing hard link. + err := safefile.RemoveRelative(name, w.destRoot) + if err != nil && !os.IsNotExist(err) { + return err + } + createDisposition = winapi.FILE_CREATE + } + + f, err := safefile.OpenRelative( + name, + w.destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + createDisposition, + winapi.FILE_OPEN_REPARSE_POINT, + ) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + _ = safefile.RemoveRelative(name, w.destRoot) + } + }() + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return err + } + + w.backupWriter = winio.NewBackupFileWriter(f, true) + w.bufWriter.Reset(w.backupWriter) + w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.destRoot + w.addedFiles[name] = true + f = nil + return nil + } + + fname := name + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + err := safefile.MkdirRelative(name, w.root) + if err != nil { + return err + } + fname += ".$wcidirs$" + w.currentIsDir = true + } + + f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, 0) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + _ = safefile.RemoveRelative(fname, w.root) + } + }() + + strippedFi := *fileInfo + strippedFi.FileAttributes = 0 + err = winio.SetFileBasicInfo(f, &strippedFi) + if err != nil { + return err + } + + if hasPathPrefix(name, HivesPath) { + w.backupWriter = winio.NewBackupFileWriter(f, false) + w.bufWriter.Reset(w.backupWriter) + } else { + w.bufWriter.Reset(f) + // The file attributes are written before the stream. + err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes)) + if err != nil { + w.bufWriter.Reset(io.Discard) + return err + } + } + + w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.root + w.addedFiles[name] = true + f = nil + return nil +} + +func (w *legacyLayerWriter) AddLink(name string, target string) error { + if err := w.reset(); err != nil { + return err + } + + target = filepath.Clean(target) + var roots []*os.File + if hasPathPrefix(target, filesPath) { + // Look for cross-layer hard link targets in the parent layers, since + // nothing is in the destination path yet. + roots = w.parentRoots + } else if hasPathPrefix(target, UtilityVMFilesPath) { + // Since the utility VM is fully cloned into the destination path + // already, look for cross-layer hard link targets directly in the + // destination path. + roots = []*os.File{w.destRoot} + } + + if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, UtilityVMFilesPath)) { + return errors.New("invalid hard link in layer") + } + + // Try to find the target of the link in a previously added file. If that + // fails, search in parent layers. + var selectedRoot *os.File + if _, ok := w.addedFiles[target]; ok { + selectedRoot = w.destRoot + } else { + for _, r := range roots { + if _, err := safefile.LstatRelative(target, r); err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + selectedRoot = r + break + } + } + if selectedRoot == nil { + return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) + } + } + + // The link can't be written until after the ImportLayer call. + w.PendingLinks = append(w.PendingLinks, pendingLink{ + Path: name, + Target: target, + TargetRoot: selectedRoot, + }) + w.addedFiles[name] = true + return nil +} + +func (w *legacyLayerWriter) Remove(name string) error { + name = filepath.Clean(name) + if hasPathPrefix(name, filesPath) { + w.Tombstones = append(w.Tombstones, name) + } else if hasPathPrefix(name, UtilityVMFilesPath) { + err := w.initUtilityVM() + if err != nil { + return err + } + // Make sure the path exists; os.RemoveAll will not fail if the file is + // already gone, and this needs to be a fatal error for diagnostics + // purposes. + if _, err := safefile.LstatRelative(name, w.destRoot); err != nil { + return err + } + err = safefile.RemoveAllRelative(name, w.destRoot) + if err != nil { + return err + } + } else { + return fmt.Errorf("invalid tombstone %s", name) + } + + return nil +} + +func (w *legacyLayerWriter) Write(b []byte) (int, error) { + if w.backupWriter == nil && w.currentFile == nil { + return 0, errors.New("closed") + } + return w.bufWriter.Write(b) +} + +func (w *legacyLayerWriter) Close() error { + if err := w.reset(); err != nil { + return err + } + if err := safefile.RemoveRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) { + return err + } + for _, pd := range w.pendingDirs { + err := safefile.MkdirRelative(pd.Path, pd.Root) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go new file mode 100644 index 0000000000..c45fa2750c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -0,0 +1,31 @@ +//go:build windows + +package wclayer + +import ( + "context" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// NameToGuid converts the given string into a GUID using the algorithm in the +// Host Compute Service, ensuring GUIDs generated with the same string are common +// across all clients. +func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { + title := "hcsshim::NameToGuid" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("objectName", name)) + + var id guid.GUID + err = nameToGuid(name, &id) + if err != nil { + return guid.GUID{}, hcserror.New(err, title, "") + } + span.AddAttributes(trace.StringAttribute("guid", id.String())) + return id, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go new file mode 100644 index 0000000000..b66e071245 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -0,0 +1,46 @@ +//go:build windows + +package wclayer + +import ( + "context" + "strings" + "sync" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +var prepareLayerLock sync.Mutex + +// PrepareLayer finds a mounted read-write layer matching path and enables the +// the filesystem filter for use on that layer. This requires the paths to all +// parent layers, and is necessary in order to view or interact with the layer +// as an actual filesystem (reading and writing files, creating directories, etc). +// Disabling the filter must be done via UnprepareLayer. +func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { + title := "hcsshim::PrepareLayer" + ctx, span := oc.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + // This lock is a temporary workaround for a Windows bug. Only allowing one + // call to prepareLayer at a time vastly reduces the chance of a timeout. + prepareLayerLock.Lock() + defer prepareLayerLock.Unlock() + err = prepareLayer(&stdDriverInfo, path, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go new file mode 100644 index 0000000000..7c49cbda45 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go @@ -0,0 +1,43 @@ +//go:build windows + +package wclayer + +import ( + "context" + "os" + + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// ProcessBaseLayer post-processes a base layer that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessBaseLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessBaseLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processBaseImage(path) + if err != nil { + return &os.PathError{Op: title, Path: path, Err: err} + } + return nil +} + +// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessUtilityVMImage(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessUtilityVMImage" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processUtilityImage(path) + if err != nil { + return &os.PathError{Op: title, Path: path, Err: err} + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go new file mode 100644 index 0000000000..fe20702c18 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -0,0 +1,27 @@ +//go:build windows + +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// UnprepareLayer disables the filesystem filter for the read-write layer with +// the given id. +func UnprepareLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::UnprepareLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = unprepareLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go new file mode 100644 index 0000000000..39682b8171 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go @@ -0,0 +1,34 @@ +//go:build windows + +package wclayer + +import "github.com/Microsoft/go-winio/pkg/guid" + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go wclayer.go + +//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? +//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? +//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer? +//sys createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer? +//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize? +//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer? +//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer? +//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer? +//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath? +//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages? +//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer? +//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists? +//sys nameToGuid(name string, guid *_guid) (hr error) = vmcompute.NameToGuid? +//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer? +//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer? +//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? +//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? + +//sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess? + +//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk + +//sys getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) = GetDiskFreeSpaceExW + +type _guid = guid.GUID diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go new file mode 100644 index 0000000000..0cb509c46f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go @@ -0,0 +1,578 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package wclayer + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") + procActivateLayer = modvmcompute.NewProc("ActivateLayer") + procCopyLayer = modvmcompute.NewProc("CopyLayer") + procCreateLayer = modvmcompute.NewProc("CreateLayer") + procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer") + procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer") + procDestroyLayer = modvmcompute.NewProc("DestroyLayer") + procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") + procExportLayer = modvmcompute.NewProc("ExportLayer") + procGetBaseImages = modvmcompute.NewProc("GetBaseImages") + procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") + procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") + procImportLayer = modvmcompute.NewProc("ImportLayer") + procLayerExists = modvmcompute.NewProc("LayerExists") + procNameToGuid = modvmcompute.NewProc("NameToGuid") + procPrepareLayer = modvmcompute.NewProc("PrepareLayer") + procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") + procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") + procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") +) + +func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(directoryName) + if err != nil { + return + } + return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes) +} + +func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func activateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _activateLayer(info, _p0) +} + +func _activateLayer(info *driverInfo, id *uint16) (hr error) { + hr = procActivateLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(srcId) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(dstId) + if hr != nil { + return + } + return _copyLayer(info, _p0, _p1, descriptors) +} + +func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procCopyLayer.Find() + if hr != nil { + return + } + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func createLayer(info *driverInfo, id string, parent string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(parent) + if hr != nil { + return + } + return _createLayer(info, _p0, _p1) +} + +func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { + hr = procCreateLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _createSandboxLayer(info, _p0, parent, descriptors) +} + +func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procCreateSandboxLayer.Find() + if hr != nil { + return + } + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func deactivateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _deactivateLayer(info, _p0) +} + +func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { + hr = procDeactivateLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func destroyLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _destroyLayer(info, _p0) +} + +func _destroyLayer(info *driverInfo, id *uint16) (hr error) { + hr = procDestroyLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _expandSandboxSize(info, _p0, size) +} + +func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { + hr = procExpandSandboxSize.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _exportLayer(info, _p0, _p1, descriptors) +} + +func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procExportLayer.Find() + if hr != nil { + return + } + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func getBaseImages(buffer **uint16) (hr error) { + hr = procGetBaseImages.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _getLayerMountPath(info, _p0, length, buffer) +} + +func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) { + hr = procGetLayerMountPath.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func grantVmAccess(vmid string, filepath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(vmid) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(filepath) + if hr != nil { + return + } + return _grantVmAccess(_p0, _p1) +} + +func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { + hr = procGrantVmAccess.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _importLayer(info, _p0, _p1, descriptors) +} + +func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procImportLayer.Find() + if hr != nil { + return + } + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func layerExists(info *driverInfo, id string, exists *uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _layerExists(info, _p0, exists) +} + +func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { + hr = procLayerExists.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func nameToGuid(name string, guid *_guid) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(name) + if hr != nil { + return + } + return _nameToGuid(_p0, guid) +} + +func _nameToGuid(name *uint16, guid *_guid) (hr error) { + hr = procNameToGuid.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _prepareLayer(info, _p0, descriptors) +} + +func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procPrepareLayer.Find() + if hr != nil { + return + } + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func processBaseImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processBaseImage(_p0) +} + +func _processBaseImage(path *uint16) (hr error) { + hr = procProcessBaseImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func processUtilityImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processUtilityImage(_p0) +} + +func _processUtilityImage(path *uint16) (hr error) { + hr = procProcessUtilityImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func unprepareLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _unprepareLayer(info, _p0) +} + +func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { + hr = procUnprepareLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go new file mode 100644 index 0000000000..559d443256 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go @@ -0,0 +1,19 @@ +package winapi + +const ( + BINDFLT_FLAG_READ_ONLY_MAPPING uint32 = 0x00000001 + BINDFLT_FLAG_MERGED_BIND_MAPPING uint32 = 0x00000002 + BINDFLT_FLAG_USE_CURRENT_SILO_MAPPING uint32 = 0x00000004 +) + +// HRESULT +// BfSetupFilter( +// _In_opt_ HANDLE JobHandle, +// _In_ ULONG Flags, +// _In_ LPCWSTR VirtualizationRootPath, +// _In_ LPCWSTR VirtualizationTargetPath, +// _In_reads_opt_( VirtualizationExceptionPathCount ) LPCWSTR* VirtualizationExceptionPaths, +// _In_opt_ ULONG VirtualizationExceptionPathCount +// ); +// +//sys BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) = bindfltapi.BfSetupFilter? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go new file mode 100644 index 0000000000..d04bffc1f9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go @@ -0,0 +1,45 @@ +package winapi + +import ( + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +type g = guid.GUID +type FsHandle uintptr +type StreamHandle uintptr + +type CimFsFileMetadata struct { + Attributes uint32 + FileSize int64 + + CreationTime windows.Filetime + LastWriteTime windows.Filetime + ChangeTime windows.Filetime + LastAccessTime windows.Filetime + + SecurityDescriptorBuffer unsafe.Pointer + SecurityDescriptorSize uint32 + + ReparseDataBuffer unsafe.Pointer + ReparseDataSize uint32 + + ExtendedAttributes unsafe.Pointer + EACount uint32 +} + +//sys CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) = cimfs.CimMountImage? +//sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage? + +//sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage? +//sys CimCloseImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCloseImage? +//sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage? + +//sys CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateFile? +//sys CimCloseStream(cimStreamHandle StreamHandle) (hr error) = cimfs.CimCloseStream? +//sys CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) = cimfs.CimWriteStream? +//sys CimDeletePath(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimDeletePath? +//sys CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateHardLink? +//sys CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateAlternateStream? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go new file mode 100644 index 0000000000..4547cdd8e8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go @@ -0,0 +1,46 @@ +//go:build windows + +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size)))) +} + +// HRESULT WINAPI CreatePseudoConsole( +// _In_ COORD size, +// _In_ HANDLE hInput, +// _In_ HANDLE hOutput, +// _In_ DWORD dwFlags, +// _Out_ HPCON* phPC +// ); +// +//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole + +// void WINAPI ClosePseudoConsole( +// _In_ HPCON hPC +// ); +// +//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole + +// HRESULT WINAPI ResizePseudoConsole( +// _In_ HPCON hPC , +// _In_ COORD size +// ); +// +//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go new file mode 100644 index 0000000000..7875466cad --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go @@ -0,0 +1,15 @@ +//go:build windows + +package winapi + +import "github.com/Microsoft/go-winio/pkg/guid" + +//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA +//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA +//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW +//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW + +type DevPropKey struct { + Fmtid guid.GUID + Pid uint32 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go new file mode 100644 index 0000000000..9acc0bfc17 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go @@ -0,0 +1,3 @@ +// Package winapi contains various low-level bindings to Windows APIs. It can +// be thought of as an extension to golang.org/x/sys/windows. +package winapi diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go new file mode 100644 index 0000000000..40cbf8712f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go @@ -0,0 +1,11 @@ +//go:build windows + +package winapi + +import ( + "golang.org/x/sys/windows" +) + +func IsElevated() bool { + return windows.GetCurrentProcessToken().IsElevated() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go new file mode 100644 index 0000000000..49ce924cbe --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go @@ -0,0 +1,17 @@ +//go:build windows + +package winapi + +import "syscall" + +//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError + +const ( + STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B + ERROR_NO_MORE_ITEMS = 0x103 + ERROR_MORE_DATA syscall.Errno = 234 +) + +func NTSuccess(status uint32) bool { + return status == 0 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go new file mode 100644 index 0000000000..3dcb3faa0b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go @@ -0,0 +1,115 @@ +//go:build windows + +package winapi + +//sys CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) = kernel32.CopyFileW +//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile +//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile + +//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject +//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject + +const ( + FileLinkInformationClass = 11 + FileDispositionInformationExClass = 64 + + FILE_READ_ATTRIBUTES = 0x0080 + FILE_WRITE_ATTRIBUTES = 0x0100 + DELETE = 0x10000 + + FILE_OPEN = 1 + FILE_CREATE = 2 + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_DIRECTORY_FILE = 0x00000001 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + + FILE_DISPOSITION_DELETE = 0x00000001 + + OBJ_DONT_REPARSE = 0x1000 + + STATUS_MORE_ENTRIES = 0x105 + STATUS_NO_MORE_ENTRIES = 0x8000001a +) + +// Select entries from FILE_INFO_BY_HANDLE_CLASS. +// +// C declaration: +// +// typedef enum _FILE_INFO_BY_HANDLE_CLASS { +// FileBasicInfo, +// FileStandardInfo, +// FileNameInfo, +// FileRenameInfo, +// FileDispositionInfo, +// FileAllocationInfo, +// FileEndOfFileInfo, +// FileStreamInfo, +// FileCompressionInfo, +// FileAttributeTagInfo, +// FileIdBothDirectoryInfo, +// FileIdBothDirectoryRestartInfo, +// FileIoPriorityHintInfo, +// FileRemoteProtocolInfo, +// FileFullDirectoryInfo, +// FileFullDirectoryRestartInfo, +// FileStorageInfo, +// FileAlignmentInfo, +// FileIdInfo, +// FileIdExtdDirectoryInfo, +// FileIdExtdDirectoryRestartInfo, +// FileDispositionInfoEx, +// FileRenameInfoEx, +// FileCaseSensitiveInfo, +// FileNormalizedNameInfo, +// MaximumFileInfoByHandleClass +// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class +const ( + FileIdInfo = 18 +) + +type FileDispositionInformationEx struct { + Flags uintptr +} + +type IOStatusBlock struct { + Status, Information uintptr +} + +type ObjectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *UnicodeString + Attributes uintptr + SecurityDescriptor uintptr + SecurityQoS uintptr +} + +type ObjectDirectoryInformation struct { + Name UnicodeString + TypeName UnicodeString +} + +type FileLinkInformation struct { + ReplaceIfExists bool + RootDirectory uintptr + FileNameLength uint32 + FileName [1]uint16 +} + +// C declaration: +// +// typedef struct _FILE_ID_INFO { +// ULONGLONG VolumeSerialNumber; +// FILE_ID_128 FileId; +// } FILE_ID_INFO, *PFILE_ID_INFO; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info +type FILE_ID_INFO struct { + VolumeSerialNumber uint64 + FileID [16]byte +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go new file mode 100644 index 0000000000..b0deb5c72d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -0,0 +1,222 @@ +//go:build windows + +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// Messages that can be received from an assigned io completion port. +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +const ( + JOB_OBJECT_MSG_END_OF_JOB_TIME uint32 = 1 + JOB_OBJECT_MSG_END_OF_PROCESS_TIME uint32 = 2 + JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT uint32 = 3 + JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO uint32 = 4 + JOB_OBJECT_MSG_NEW_PROCESS uint32 = 6 + JOB_OBJECT_MSG_EXIT_PROCESS uint32 = 7 + JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS uint32 = 8 + JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT uint32 = 9 + JOB_OBJECT_MSG_JOB_MEMORY_LIMIT uint32 = 10 + JOB_OBJECT_MSG_NOTIFICATION_LIMIT uint32 = 11 +) + +// Access rights for creating or opening job objects. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights +const ( + JOB_OBJECT_QUERY = 0x0004 + JOB_OBJECT_ALL_ACCESS = 0x1F001F +) + +// IO limit flags +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1 + +const JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE uint32 = 0x1 + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +const ( + JOB_OBJECT_CPU_RATE_CONTROL_ENABLE uint32 = 1 << iota + JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY + JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE +) + +// JobObjectInformationClass values. Used for a call to QueryInformationJobObject +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject +const ( + JobObjectBasicAccountingInformation uint32 = 1 + JobObjectBasicProcessIdList uint32 = 3 + JobObjectBasicAndIoAccountingInformation uint32 = 8 + JobObjectLimitViolationInformation uint32 = 13 + JobObjectMemoryUsageInformation uint32 = 28 + JobObjectNotificationLimitInformation2 uint32 = 33 + JobObjectCreateSilo uint32 = 35 + JobObjectSiloBasicInformation uint32 = 36 + JobObjectIoAttribution uint32 = 42 +) + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct { + ControlFlags uint32 + Value uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct { + MaxIops int64 + MaxBandwidth int64 + ReservationIops int64 + BaseIOSize uint32 + VolumeName string + ControlFlags uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list +type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { + NumberOfAssignedProcesses uint32 + NumberOfProcessIdsInList uint32 + ProcessIdList [1]uintptr +} + +// AllPids returns all the process Ids in the job object. +func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { + return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList] +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information +type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { + TotalUserTime int64 + TotalKernelTime int64 + ThisPeriodTotalUserTime int64 + ThisPeriodTotalKernelTime int64 + TotalPageFaultCount uint32 + TotalProcesses uint32 + ActiveProcesses uint32 + TotalTerminateProcesses uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information +type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { + BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION + IoInfo windows.IO_COUNTERS +} + +// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { +// ULONG64 JobMemory; +// ULONG64 PeakJobMemoryUsed; +// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; +type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { + JobMemory uint64 + PeakJobMemoryUsed uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { +// ULONG_PTR IoCount; +// ULONGLONG TotalNonOverlappedQueueTime; +// ULONGLONG TotalNonOverlappedServiceTime; +// ULONGLONG TotalSize; +// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; +type JOBOBJECT_IO_ATTRIBUTION_STATS struct { + IoCount uintptr + TotalNonOverlappedQueueTime uint64 + TotalNonOverlappedServiceTime uint64 + TotalSize uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { +// ULONG ControlFlags; +// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; +// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; +// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; +type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { + ControlFlags uint32 + ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS + WriteStats JOBOBJECT_IO_ATTRIBUTION_STATS +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { + CompletionKey windows.Handle + CompletionPort windows.Handle +} + +// BOOL IsProcessInJob( +// HANDLE ProcessHandle, +// HANDLE JobHandle, +// PBOOL Result +// ); +// +//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob + +// BOOL QueryInformationJobObject( +// HANDLE hJob, +// JOBOBJECTINFOCLASS JobObjectInformationClass, +// LPVOID lpJobObjectInformation, +// DWORD cbJobObjectInformationLength, +// LPDWORD lpReturnLength +// ); +// +//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject + +// HANDLE OpenJobObjectW( +// DWORD dwDesiredAccess, +// BOOL bInheritHandle, +// LPCWSTR lpName +// ); +// +//sys OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW + +// DWORD SetIoRateControlInformationJobObject( +// HANDLE hJob, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo +// ); +// +//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject + +// DWORD QueryIoRateControlInformationJobObject( +// HANDLE hJob, +// PCWSTR VolumeName, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, +// ULONG *InfoBlockCount +// ); +// +//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject + +// NTSTATUS +// NtOpenJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +// +//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject + +// NTSTATUS +// NTAPI +// NtCreateJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +// +//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go new file mode 100644 index 0000000000..b6e7cfd460 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go @@ -0,0 +1,30 @@ +package winapi + +// BOOL LogonUserA( +// LPCWSTR lpszUsername, +// LPCWSTR lpszDomain, +// LPCWSTR lpszPassword, +// DWORD dwLogonType, +// DWORD dwLogonProvider, +// PHANDLE phToken +// ); +// +//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW + +// Logon types +const ( + LOGON32_LOGON_INTERACTIVE uint32 = 2 + LOGON32_LOGON_NETWORK uint32 = 3 + LOGON32_LOGON_BATCH uint32 = 4 + LOGON32_LOGON_SERVICE uint32 = 5 + LOGON32_LOGON_UNLOCK uint32 = 7 + LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8 + LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9 +) + +// Logon providers +const ( + LOGON32_PROVIDER_DEFAULT uint32 = 0 + LOGON32_PROVIDER_WINNT40 uint32 = 2 + LOGON32_PROVIDER_WINNT50 uint32 = 3 +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go new file mode 100644 index 0000000000..53f62948c9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go @@ -0,0 +1,4 @@ +package winapi + +//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc +//sys LocalFree(ptr uintptr) = kernel32.LocalFree diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go new file mode 100644 index 0000000000..f37910024f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go @@ -0,0 +1,3 @@ +package winapi + +//sys SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) = iphlpapi.SetJobCompartmentId diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go new file mode 100644 index 0000000000..c578b3d315 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go @@ -0,0 +1,37 @@ +package winapi + +// Offline registry management API + +type ORHKey uintptr + +type RegType uint32 + +const ( + // Registry value types: https://docs.microsoft.com/en-us/windows/win32/sysinfo/registry-value-types + REG_TYPE_NONE RegType = 0 + REG_TYPE_SZ RegType = 1 + REG_TYPE_EXPAND_SZ RegType = 2 + REG_TYPE_BINARY RegType = 3 + REG_TYPE_DWORD RegType = 4 + REG_TYPE_DWORD_LITTLE_ENDIAN RegType = 4 + REG_TYPE_DWORD_BIG_ENDIAN RegType = 5 + REG_TYPE_LINK RegType = 6 + REG_TYPE_MULTI_SZ RegType = 7 + REG_TYPE_RESOURCE_LIST RegType = 8 + REG_TYPE_FULL_RESOURCE_DESCRIPTOR RegType = 9 + REG_TYPE_RESOURCE_REQUIREMENTS_LIST RegType = 10 + REG_TYPE_QWORD RegType = 11 + REG_TYPE_QWORD_LITTLE_ENDIAN RegType = 11 +) + +//sys ORCreateHive(key *ORHKey) (win32err error) = offreg.ORCreateHive +//sys ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) = offreg.ORMergeHives +//sys OROpenHive(hivePath string, result *ORHKey) (win32err error) = offreg.OROpenHive +//sys ORCloseHive(handle ORHKey) (win32err error) = offreg.ORCloseHive +//sys ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVersion uint32) (win32err error) = offreg.ORSaveHive +//sys OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) = offreg.OROpenKey +//sys ORCloseKey(handle ORHKey) (win32err error) = offreg.ORCloseKey +//sys ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) = offreg.ORCreateKey +//sys ORDeleteKey(handle ORHKey, subKey string) (win32err error) = offreg.ORDeleteKey +//sys ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, data *byte, dataLen *uint32) (win32err error) = offreg.ORGetValue +//sys ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, dataLen uint32) (win32err error) = offreg.ORSetValue diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go new file mode 100644 index 0000000000..c6a149b552 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go @@ -0,0 +1,12 @@ +package winapi + +// DWORD SearchPathW( +// LPCWSTR lpPath, +// LPCWSTR lpFileName, +// LPCWSTR lpExtension, +// DWORD nBufferLength, +// LPWSTR lpBuffer, +// LPWSTR *lpFilePart +// ); +// +//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go new file mode 100644 index 0000000000..f4ae94cfa5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -0,0 +1,61 @@ +package winapi + +const PROCESS_ALL_ACCESS uint32 = 2097151 + +const ( + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 + PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D +) + +// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures. +const ProcessVmCounters = 3 + +// __kernel_entry NTSTATUS NtQueryInformationProcess( +// [in] HANDLE ProcessHandle, +// [in] PROCESSINFOCLASS ProcessInformationClass, +// [out] PVOID ProcessInformation, +// [in] ULONG ProcessInformationLength, +// [out, optional] PULONG ReturnLength +// ); +// +//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess + +// typedef struct _VM_COUNTERS_EX { +// SIZE_T PeakVirtualSize; +// SIZE_T VirtualSize; +// ULONG PageFaultCount; +// SIZE_T PeakWorkingSetSize; +// SIZE_T WorkingSetSize; +// SIZE_T QuotaPeakPagedPoolUsage; +// SIZE_T QuotaPagedPoolUsage; +// SIZE_T QuotaPeakNonPagedPoolUsage; +// SIZE_T QuotaNonPagedPoolUsage; +// SIZE_T PagefileUsage; +// SIZE_T PeakPagefileUsage; +// SIZE_T PrivateUsage; +// } VM_COUNTERS_EX, *PVM_COUNTERS_EX; +type VM_COUNTERS_EX struct { + PeakVirtualSize uintptr + VirtualSize uintptr + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +// typedef struct _VM_COUNTERS_EX2 { +// VM_COUNTERS_EX CountersEx; +// SIZE_T PrivateWorkingSetSize; +// SIZE_T SharedCommitUsage; +// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2; +type VM_COUNTERS_EX2 struct { + CountersEx VM_COUNTERS_EX + PrivateWorkingSetSize uintptr + SharedCommitUsage uintptr +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go new file mode 100644 index 0000000000..ce79ac2cdb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go @@ -0,0 +1,7 @@ +package winapi + +// Get count from all processor groups. +// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups +const ALL_PROCESSOR_GROUPS = 0xFFFF + +//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go new file mode 100644 index 0000000000..cb494aaa65 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go @@ -0,0 +1,55 @@ +//go:build windows + +package winapi + +import "golang.org/x/sys/windows" + +const SystemProcessInformation = 5 + +const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 + +// __kernel_entry NTSTATUS NtQuerySystemInformation( +// SYSTEM_INFORMATION_CLASS SystemInformationClass, +// PVOID SystemInformation, +// ULONG SystemInformationLength, +// PULONG ReturnLength +// ); +// +//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation + +type SYSTEM_PROCESS_INFORMATION struct { + NextEntryOffset uint32 // ULONG + NumberOfThreads uint32 // ULONG + WorkingSetPrivateSize int64 // LARGE_INTEGER + HardFaultCount uint32 // ULONG + NumberOfThreadsHighWatermark uint32 // ULONG + CycleTime uint64 // ULONGLONG + CreateTime int64 // LARGE_INTEGER + UserTime int64 // LARGE_INTEGER + KernelTime int64 // LARGE_INTEGER + ImageName UnicodeString // UNICODE_STRING + BasePriority int32 // KPRIORITY + UniqueProcessID windows.Handle // HANDLE + InheritedFromUniqueProcessID windows.Handle // HANDLE + HandleCount uint32 // ULONG + SessionID uint32 // ULONG + UniqueProcessKey *uint32 // ULONG_PTR + PeakVirtualSize uintptr // SIZE_T + VirtualSize uintptr // SIZE_T + PageFaultCount uint32 // ULONG + PeakWorkingSetSize uintptr // SIZE_T + WorkingSetSize uintptr // SIZE_T + QuotaPeakPagedPoolUsage uintptr // SIZE_T + QuotaPagedPoolUsage uintptr // SIZE_T + QuotaPeakNonPagedPoolUsage uintptr // SIZE_T + QuotaNonPagedPoolUsage uintptr // SIZE_T + PagefileUsage uintptr // SIZE_T + PeakPagefileUsage uintptr // SIZE_T + PrivatePageCount uintptr // SIZE_T + ReadOperationCount int64 // LARGE_INTEGER + WriteOperationCount int64 // LARGE_INTEGER + OtherOperationCount int64 // LARGE_INTEGER + ReadTransferCount int64 // LARGE_INTEGER + WriteTransferCount int64 // LARGE_INTEGER + OtherTransferCount int64 // LARGE_INTEGER +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go new file mode 100644 index 0000000000..f23141a836 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go @@ -0,0 +1,13 @@ +package winapi + +// HANDLE CreateRemoteThread( +// HANDLE hProcess, +// LPSECURITY_ATTRIBUTES lpThreadAttributes, +// SIZE_T dwStackSize, +// LPTHREAD_START_ROUTINE lpStartAddress, +// LPVOID lpParameter, +// DWORD dwCreationFlags, +// LPDWORD lpThreadId +// ); +// +//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go new file mode 100644 index 0000000000..84d4cc294d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go @@ -0,0 +1,194 @@ +//go:build windows + +package winapi + +import ( + "syscall" + + "golang.org/x/sys/windows" +) + +const UserNameCharLimit = 20 + +const ( + USER_PRIV_GUEST uint32 = iota + USER_PRIV_USER + USER_PRIV_ADMIN +) + +const ( + UF_NORMAL_ACCOUNT = 0x00200 + UF_DONT_EXPIRE_PASSWD = 0x10000 +) + +const NERR_UserNotFound = syscall.Errno(0x8AD) + +// typedef struct _LOCALGROUP_MEMBERS_INFO_0 { +// PSID lgrmi0_sid; +// } LOCALGROUP_MEMBERS_INFO_0, *PLOCALGROUP_MEMBERS_INFO_0, *LPLOCALGROUP_MEMBERS_INFO_0; +type LocalGroupMembersInfo0 struct { + Sid *windows.SID +} + +// typedef struct _LOCALGROUP_INFO_1 { +// LPWSTR lgrpi1_name; +// LPWSTR lgrpi1_comment; +// } LOCALGROUP_INFO_1, *PLOCALGROUP_INFO_1, *LPLOCALGROUP_INFO_1; +type LocalGroupInfo1 struct { + Name *uint16 + Comment *uint16 +} + +// typedef struct _USER_INFO_1 { +// LPWSTR usri1_name; +// LPWSTR usri1_password; +// DWORD usri1_password_age; +// DWORD usri1_priv; +// LPWSTR usri1_home_dir; +// LPWSTR usri1_comment; +// DWORD usri1_flags; +// LPWSTR usri1_script_path; +// } USER_INFO_1, *PUSER_INFO_1, *LPUSER_INFO_1; +type UserInfo1 struct { + Name *uint16 + Password *uint16 + PasswordAge uint32 + Priv uint32 + HomeDir *uint16 + Comment *uint16 + Flags uint32 + ScriptPath *uint16 +} + +// NET_API_STATUS NET_API_FUNCTION NetLocalGroupGetInfo( +// [in] LPCWSTR servername, +// [in] LPCWSTR groupname, +// [in] DWORD level, +// [out] LPBYTE *bufptr +// ); +// +//sys netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) = netapi32.NetLocalGroupGetInfo + +// NetLocalGroupGetInfo is a slightly go friendlier wrapper around the NetLocalGroupGetInfo function. Instead of taking in *uint16's, it takes in +// go strings and does the conversion internally. +func NetLocalGroupGetInfo(serverName, groupName string, level uint32, bufPtr **byte) (err error) { + var ( + serverNameUTF16 *uint16 + groupNameUTF16 *uint16 + ) + if serverName != "" { + serverNameUTF16, err = windows.UTF16PtrFromString(serverName) + if err != nil { + return err + } + } + if groupName != "" { + groupNameUTF16, err = windows.UTF16PtrFromString(groupName) + if err != nil { + return err + } + } + return netLocalGroupGetInfo( + serverNameUTF16, + groupNameUTF16, + level, + bufPtr, + ) +} + +// NET_API_STATUS NET_API_FUNCTION NetUserAdd( +// [in] LPCWSTR servername, +// [in] DWORD level, +// [in] LPBYTE buf, +// [out] LPDWORD parm_err +// ); +// +//sys netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) = netapi32.NetUserAdd + +// NetUserAdd is a slightly go friendlier wrapper around the NetUserAdd function. Instead of taking in *uint16's, it takes in +// go strings and does the conversion internally. +func NetUserAdd(serverName string, level uint32, buf *byte, parm_err *uint32) (err error) { + var serverNameUTF16 *uint16 + if serverName != "" { + serverNameUTF16, err = windows.UTF16PtrFromString(serverName) + if err != nil { + return err + } + } + return netUserAdd( + serverNameUTF16, + level, + buf, + parm_err, + ) +} + +// NET_API_STATUS NET_API_FUNCTION NetUserDel( +// [in] LPCWSTR servername, +// [in] LPCWSTR username +// ); +// +//sys netUserDel(serverName *uint16, username *uint16) (status error) = netapi32.NetUserDel + +// NetUserDel is a slightly go friendlier wrapper around the NetUserDel function. Instead of taking in *uint16's, it takes in +// go strings and does the conversion internally. +func NetUserDel(serverName, userName string) (err error) { + var ( + serverNameUTF16 *uint16 + userNameUTF16 *uint16 + ) + if serverName != "" { + serverNameUTF16, err = windows.UTF16PtrFromString(serverName) + if err != nil { + return err + } + } + if userName != "" { + userNameUTF16, err = windows.UTF16PtrFromString(userName) + if err != nil { + return err + } + } + return netUserDel( + serverNameUTF16, + userNameUTF16, + ) +} + +// NET_API_STATUS NET_API_FUNCTION NetLocalGroupAddMembers( +// [in] LPCWSTR servername, +// [in] LPCWSTR groupname, +// [in] DWORD level, +// [in] LPBYTE buf, +// [in] DWORD totalentries +// ); +// +//sys netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) = netapi32.NetLocalGroupAddMembers + +// NetLocalGroupAddMembers is a slightly go friendlier wrapper around the NetLocalGroupAddMembers function. Instead of taking in *uint16's, it takes in +// go strings and does the conversion internally. +func NetLocalGroupAddMembers(serverName, groupName string, level uint32, buf *byte, totalEntries uint32) (err error) { + var ( + serverNameUTF16 *uint16 + groupNameUTF16 *uint16 + ) + if serverName != "" { + serverNameUTF16, err = windows.UTF16PtrFromString(serverName) + if err != nil { + return err + } + } + if groupName != "" { + groupNameUTF16, err = windows.UTF16PtrFromString(groupName) + if err != nil { + return err + } + } + return netLocalGroupAddMembers( + serverNameUTF16, + groupNameUTF16, + level, + buf, + totalEntries, + ) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go new file mode 100644 index 0000000000..93d633d490 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -0,0 +1,88 @@ +//go:build windows + +package winapi + +import ( + "errors" + "reflect" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice +// for easier interop with Go APIs +func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) + hdr.Data = uintptr(unsafe.Pointer(buffer)) + hdr.Cap = bufferLength + hdr.Len = bufferLength + + return +} + +// UnicodeString corresponds to UNICODE_STRING win32 struct defined here +// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string +type UnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value +// denotes the maximum number of wide chars a path can have. +const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 + +// String converts a UnicodeString to a golang string +func (uni UnicodeString) String() string { + // UnicodeString is not guaranteed to be null terminated, therefore + // use the UnicodeString's Length field + return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) +} + +// NewUnicodeString allocates a new UnicodeString and copies `s` into +// the buffer of the new UnicodeString. +func NewUnicodeString(s string) (*UnicodeString, error) { + buf, err := windows.UTF16FromString(s) + if err != nil { + return nil, err + } + + if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { + return nil, syscall.ENAMETOOLONG + } + + uni := &UnicodeString{ + // The length is in bytes and should not include the trailing null character. + Length: uint16((len(buf) - 1) * 2), + MaximumLength: uint16((len(buf) - 1) * 2), + Buffer: &buf[0], + } + return uni, nil +} + +// ConvertStringSetToSlice is a helper function used to convert the contents of +// `buf` into a string slice. `buf` contains a set of null terminated strings +// with an additional null at the end to indicate the end of the set. +func ConvertStringSetToSlice(buf []byte) ([]string, error) { + var results []string + prev := 0 + for i := range buf { + if buf[i] == 0 { + if prev == i { + // found two null characters in a row, return result + return results, nil + } + results = append(results, string(buf[prev:i])) + prev = i + 1 + } + } + return nil, errors.New("string set malformed: missing null terminator at end of buffer") +} + +// ParseUtf16LE parses a UTF-16LE byte array into a string (without passing +// through a uint16 or rune array). +func ParseUtf16LE(b []byte) string { + return windows.UTF16PtrToString((*uint16)(unsafe.Pointer(&b[0]))) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go new file mode 100644 index 0000000000..6a90e3a69a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -0,0 +1,3 @@ +package winapi + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go new file mode 100644 index 0000000000..b5b9fcccc7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -0,0 +1,791 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package winapi + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modbindfltapi = windows.NewLazySystemDLL("bindfltapi.dll") + modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + modcimfs = windows.NewLazySystemDLL("cimfs.dll") + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modnetapi32 = windows.NewLazySystemDLL("netapi32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modoffreg = windows.NewLazySystemDLL("offreg.dll") + + procLogonUserW = modadvapi32.NewProc("LogonUserW") + procBfSetupFilter = modbindfltapi.NewProc("BfSetupFilter") + procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") + procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") + procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") + procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCimCloseImage = modcimfs.NewProc("CimCloseImage") + procCimCloseStream = modcimfs.NewProc("CimCloseStream") + procCimCommitImage = modcimfs.NewProc("CimCommitImage") + procCimCreateAlternateStream = modcimfs.NewProc("CimCreateAlternateStream") + procCimCreateFile = modcimfs.NewProc("CimCreateFile") + procCimCreateHardLink = modcimfs.NewProc("CimCreateHardLink") + procCimCreateImage = modcimfs.NewProc("CimCreateImage") + procCimDeletePath = modcimfs.NewProc("CimDeletePath") + procCimDismountImage = modcimfs.NewProc("CimDismountImage") + procCimMountImage = modcimfs.NewProc("CimMountImage") + procCimWriteStream = modcimfs.NewProc("CimWriteStream") + procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") + procCopyFileW = modkernel32.NewProc("CopyFileW") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") + procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") + procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") + procSearchPathW = modkernel32.NewProc("SearchPathW") + procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") + procNetLocalGroupAddMembers = modnetapi32.NewProc("NetLocalGroupAddMembers") + procNetLocalGroupGetInfo = modnetapi32.NewProc("NetLocalGroupGetInfo") + procNetUserAdd = modnetapi32.NewProc("NetUserAdd") + procNetUserDel = modnetapi32.NewProc("NetUserDel") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") + procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") + procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") + procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") + procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") + procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") + procORCloseHive = modoffreg.NewProc("ORCloseHive") + procORCloseKey = modoffreg.NewProc("ORCloseKey") + procORCreateHive = modoffreg.NewProc("ORCreateHive") + procORCreateKey = modoffreg.NewProc("ORCreateKey") + procORDeleteKey = modoffreg.NewProc("ORDeleteKey") + procORGetValue = modoffreg.NewProc("ORGetValue") + procORMergeHives = modoffreg.NewProc("ORMergeHives") + procOROpenHive = modoffreg.NewProc("OROpenHive") + procOROpenKey = modoffreg.NewProc("OROpenKey") + procORSaveHive = modoffreg.NewProc("ORSaveHive") + procORSetValue = modoffreg.NewProc("ORSetValue") +) + +func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) { + hr = procBfSetupFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procBfSetupFilter.Addr(), 6, uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(pDeviceID) + if hr != nil { + return + } + return _CMLocateDevNode(pdnDevInst, _p0, uFlags) +} + +func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCloseImage(cimFSHandle FsHandle) (hr error) { + hr = procCimCloseImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCloseStream(cimStreamHandle StreamHandle) (hr error) { + hr = procCimCloseStream.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimCloseStream.Addr(), 1, uintptr(cimStreamHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCommitImage(cimFSHandle FsHandle) (hr error) { + hr = procCimCommitImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimCommitImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimCreateAlternateStream(cimFSHandle, _p0, size, cimStreamHandle) +} + +func _CimCreateAlternateStream(cimFSHandle FsHandle, path *uint16, size uint64, cimStreamHandle *StreamHandle) (hr error) { + hr = procCimCreateAlternateStream.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCimCreateAlternateStream.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimCreateFile(cimFSHandle, _p0, file, cimStreamHandle) +} + +func _CimCreateFile(cimFSHandle FsHandle, path *uint16, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) { + hr = procCimCreateFile.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCimCreateFile.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(newPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(oldPath) + if hr != nil { + return + } + return _CimCreateHardLink(cimFSHandle, _p0, _p1) +} + +func _CimCreateHardLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) (hr error) { + hr = procCimCreateHardLink.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimCreateHardLink.Addr(), 3, uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(imagePath) + if hr != nil { + return + } + return _CimCreateImage(_p0, oldFSName, newFSName, cimFSHandle) +} + +func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + hr = procCimCreateImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCimCreateImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimDeletePath(cimFSHandle FsHandle, path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimDeletePath(cimFSHandle, _p0) +} + +func _CimDeletePath(cimFSHandle FsHandle, path *uint16) (hr error) { + hr = procCimDeletePath.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimDeletePath.Addr(), 2, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimDismountImage(volumeID *g) (hr error) { + hr = procCimDismountImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimDismountImage.Addr(), 1, uintptr(unsafe.Pointer(volumeID)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(imagePath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(fsName) + if hr != nil { + return + } + return _CimMountImage(_p0, _p1, flags, volumeID) +} + +func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g) (hr error) { + hr = procCimMountImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCimMountImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) { + hr = procCimWriteStream.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimWriteStream.Addr(), 3, uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { + r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) + if r0 != 0 { + win32Err = syscall.Errno(r0) + } + return +} + +func ClosePseudoConsole(hpc windows.Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) + return +} + +func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) { + r1, _, e1 := syscall.Syscall(procCopyFileW.Addr(), 3, uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) + handle = windows.Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { + r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + amount = uint32(r0) + return +} + +func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { + r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LocalAlloc(flags uint32, size int) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + ptr = uintptr(r0) + return +} + +func LocalFree(ptr uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + return +} + +func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName))) + handle = windows.Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) + ret = uint32(r0) + if ret == 0 { + err = errnoErr(e1) + } + return +} + +func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) + size = uint32(r0) + if size == 0 { + err = errnoErr(e1) + } + return +} + +func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) + ret = uint32(r0) + if ret == 0 { + err = errnoErr(e1) + } + return +} + +func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) { + r0, _, _ := syscall.Syscall6(procNetLocalGroupAddMembers.Addr(), 5, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries), 0) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) { + r0, _, _ := syscall.Syscall6(procNetLocalGroupGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)), 0, 0) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) { + r0, _, _ := syscall.Syscall6(procNetUserAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)), 0, 0) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func netUserDel(serverName *uint16, username *uint16) (status error) { + r0, _, _ := syscall.Syscall(procNetUserDel.Addr(), 2, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)), 0) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + status = uint32(r0) + return +} + +func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) + status = uint32(r0) + return +} + +func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { + var _p0 uint32 + if singleEntry { + _p0 = 1 + } + var _p1 uint32 + if restartScan { + _p1 = 1 + } + r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) + status = uint32(r0) + return +} + +func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + status = uint32(r0) + return +} + +func RtlNtStatusToDosError(status uint32) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func ORCloseHive(handle ORHKey) (win32err error) { + r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(handle), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORCloseKey(handle ORHKey) (win32err error) { + r0, _, _ := syscall.Syscall(procORCloseKey.Addr(), 1, uintptr(handle), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORCreateHive(key *ORHKey) (win32err error) { + r0, _, _ := syscall.Syscall(procORCreateHive.Addr(), 1, uintptr(unsafe.Pointer(key)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(subKey) + if win32err != nil { + return + } + return _ORCreateKey(handle, _p0, class, options, securityDescriptor, result, disposition) +} + +func _ORCreateKey(handle ORHKey, subKey *uint16, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) { + r0, _, _ := syscall.Syscall9(procORCreateKey.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORDeleteKey(handle ORHKey, subKey string) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(subKey) + if win32err != nil { + return + } + return _ORDeleteKey(handle, _p0) +} + +func _ORDeleteKey(handle ORHKey, subKey *uint16) (win32err error) { + r0, _, _ := syscall.Syscall(procORDeleteKey.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(subKey)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, data *byte, dataLen *uint32) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(subKey) + if win32err != nil { + return + } + var _p1 *uint16 + _p1, win32err = syscall.UTF16PtrFromString(value) + if win32err != nil { + return + } + return _ORGetValue(handle, _p0, _p1, valueType, data, dataLen) +} + +func _ORGetValue(handle ORHKey, subKey *uint16, value *uint16, valueType *uint32, data *byte, dataLen *uint32) (win32err error) { + r0, _, _ := syscall.Syscall6(procORGetValue.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) { + var _p0 *ORHKey + if len(hiveHandles) > 0 { + _p0 = &hiveHandles[0] + } + r0, _, _ := syscall.Syscall(procORMergeHives.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func OROpenHive(hivePath string, result *ORHKey) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(hivePath) + if win32err != nil { + return + } + return _OROpenHive(_p0, result) +} + +func _OROpenHive(hivePath *uint16, result *ORHKey) (win32err error) { + r0, _, _ := syscall.Syscall(procOROpenHive.Addr(), 2, uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(subKey) + if win32err != nil { + return + } + return _OROpenKey(handle, _p0, result) +} + +func _OROpenKey(handle ORHKey, subKey *uint16, result *ORHKey) (win32err error) { + r0, _, _ := syscall.Syscall(procOROpenKey.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVersion uint32) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(hivePath) + if win32err != nil { + return + } + return _ORSaveHive(handle, _p0, osMajorVersion, osMinorVersion) +} + +func _ORSaveHive(handle ORHKey, hivePath *uint16, osMajorVersion uint32, osMinorVersion uint32) (win32err error) { + r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, dataLen uint32) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(valueName) + if win32err != nil { + return + } + return _ORSetValue(handle, _p0, valueType, data, dataLen) +} + +func _ORSetValue(handle ORHKey, valueName *uint16, valueType uint32, data *byte, dataLen uint32) (win32err error) { + r0, _, _ := syscall.Syscall6(procORSetValue.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go new file mode 100644 index 0000000000..7e9c9fbbe8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/layer.go @@ -0,0 +1,113 @@ +//go:build windows + +package hcsshim + +import ( + "context" + "crypto/sha1" + "path/filepath" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/wclayer" +) + +func layerPath(info *DriverInfo, id string) string { + return filepath.Join(info.HomeDir, id) +} + +func ActivateLayer(info DriverInfo, id string) error { + return wclayer.ActivateLayer(context.Background(), layerPath(&info, id)) +} +func CreateLayer(info DriverInfo, id, parent string) error { + return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent) +} + +// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. +func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) +} +func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) +} +func DeactivateLayer(info DriverInfo, id string) error { + return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id)) +} + +func DestroyLayer(info DriverInfo, id string) error { + return wclayer.DestroyLayer(context.Background(), layerPath(&info, id)) +} + +// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. +func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) +} +func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) +} +func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { + return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths) +} +func GetLayerMountPath(info DriverInfo, id string) (string, error) { + return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id)) +} +func GetSharedBaseImages() (imageData string, err error) { + return wclayer.GetSharedBaseImages(context.Background()) +} +func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { + return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths) +} +func LayerExists(info DriverInfo, id string) (bool, error) { + return wclayer.LayerExists(context.Background(), layerPath(&info, id)) +} +func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { + return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) +} +func ProcessBaseLayer(path string) error { + return wclayer.ProcessBaseLayer(context.Background(), path) +} +func ProcessUtilityVMImage(path string) error { + return wclayer.ProcessUtilityVMImage(context.Background(), path) +} +func UnprepareLayer(info DriverInfo, layerId string) error { + return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId)) +} +func ConvertToBaseLayer(path string) error { + return wclayer.ConvertToBaseLayer(context.Background(), path) +} + +type DriverInfo struct { + Flavour int + HomeDir string +} + +type GUID [16]byte + +func NameToGuid(name string) (id GUID, err error) { + g, err := wclayer.NameToGuid(context.Background(), name) + return g.ToWindowsArray(), err +} + +func NewGUID(source string) *GUID { + h := sha1.Sum([]byte(source)) + var g GUID + copy(g[0:], h[0:16]) + return &g +} + +func (g *GUID) ToString() string { + return guid.FromWindowsArray(*g).String() +} + +type LayerReader = wclayer.LayerReader + +func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { + return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths) +} + +type LayerWriter = wclayer.LayerWriter + +func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { + return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths) +} + +type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 0000000000..3227ebe89c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,74 @@ +package osversion + +import ( + "fmt" + "sync" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +var ( + osv OSVersion + once sync.Once +) + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + once.Do(func() { + v := *windows.RtlGetVersion() + osv = OSVersion{} + osv.MajorVersion = uint8(v.MajorVersion) + osv.MinorVersion = uint8(v.MinorVersion) + osv.Build = uint16(v.BuildNumber) + // Fill version value so that existing clients don't break + osv.Version = v.BuildNumber << 16 + osv.Version = osv.Version | (uint32(v.MinorVersion) << 8) + osv.Version = osv.Version | v.MajorVersion + }) + return osv +} + +// Build gets the build-number on Windows +// The calling application must be manifested to get the correct version information. +func Build() uint16 { + return Get().Build +} + +// String returns the OSVersion formatted as a string. It implements the +// [fmt.Stringer] interface. +func (osv OSVersion) String() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} + +// ToString returns the OSVersion formatted as a string. +// +// Deprecated: use [OSVersion.String]. +func (osv OSVersion) ToString() string { + return osv.String() +} + +// Running `cmd /c ver` shows something like "10.0.20348.1000". The last component ("1000") is the revision +// number +func BuildRevision() (uint32, error) { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { + return 0, fmt.Errorf("open `CurrentVersion` registry key: %w", err) + } + defer k.Close() + s, _, err := k.GetIntegerValue("UBR") + if err != nil { + return 0, fmt.Errorf("read `UBR` from registry: %w", err) + } + return uint32(s), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go new file mode 100644 index 0000000000..f8d411ad7e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go @@ -0,0 +1,35 @@ +package osversion + +// List of stable ABI compliant ltsc releases +// Note: List must be sorted in ascending order +var compatLTSCReleases = []uint16{ + V21H2Server, +} + +// CheckHostAndContainerCompat checks if given host and container +// OS versions are compatible. +// It includes support for stable ABI compliant versions as well. +// Every release after WS 2022 will support the previous ltsc +// container image. Stable ABI is in preview mode for windows 11 client. +// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility +func CheckHostAndContainerCompat(host, ctr OSVersion) bool { + // check major minor versions of host and guest + if host.MajorVersion != ctr.MajorVersion || + host.MinorVersion != ctr.MinorVersion { + return false + } + + // If host is < WS 2022, exact version match is required + if host.Build < V21H2Server { + return host.Build == ctr.Build + } + + var supportedLtscRelease uint16 + for i := len(compatLTSCReleases) - 1; i >= 0; i-- { + if host.Build >= compatLTSCReleases[i] { + supportedLtscRelease = compatLTSCReleases[i] + break + } + } + return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 0000000000..446369591a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,84 @@ +package osversion + +// Windows Client and Server build numbers. +// +// See: +// https://learn.microsoft.com/en-us/windows/release-health/release-information +// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info +// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + // V1607 (version 1607, codename "Redstone 1") is an alias for [RS1]. + V1607 = RS1 + // LTSC2016 (Windows Server 2016) is an alias for [RS1]. + LTSC2016 = RS1 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + // V1703 (version 1703, codename "Redstone 2") is an alias for [RS2]. + V1703 = RS2 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + // V1709 (version 1709, codename "Redstone 3") is an alias for [RS3]. + V1709 = RS3 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + // V1803 (version 1803, codename "Redstone 4") is an alias for [RS4]. + V1803 = RS4 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 + // V1809 (version 1809, codename "Redstone 5") is an alias for [RS5]. + V1809 = RS5 + // LTSC2019 (Windows Server 2019) is an alias for [RS5]. + LTSC2019 = RS5 + + // V19H1 (version 1903, codename 19H1) corresponds to Windows Server 1903 (semi-annual + // channel). + V19H1 = 18362 + // V1903 (version 1903) is an alias for [V19H1]. + V1903 = V19H1 + + // V19H2 (version 1909, codename 19H2) corresponds to Windows Server 1909 (semi-annual + // channel). + V19H2 = 18363 + // V1909 (version 1909) is an alias for [V19H2]. + V1909 = V19H2 + + // V20H1 (version 2004, codename 20H1) corresponds to Windows Server 2004 (semi-annual + // channel). + V20H1 = 19041 + // V2004 (version 2004) is an alias for [V20H1]. + V2004 = V20H1 + + // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). + V20H2 = 19042 + + // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). + V21H1 = 19043 + + // V21H2Win10 corresponds to Windows 10 (November 2021 Update). + V21H2Win10 = 19044 + + // V21H2Server corresponds to Windows Server 2022 (ltsc2022). + V21H2Server = 20348 + // LTSC2022 (Windows Server 2022) is an alias for [V21H2Server] + LTSC2022 = V21H2Server + + // V21H2Win11 corresponds to Windows 11 (original release). + V21H2Win11 = 22000 + + // V22H2Win10 corresponds to Windows 10 (2022 Update). + V22H2Win10 = 19045 + + // V22H2Win11 corresponds to Windows 11 (2022 Update). + V22H2Win11 = 22621 +) diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go new file mode 100644 index 0000000000..44df91cde2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/process.go @@ -0,0 +1,100 @@ +//go:build windows + +package hcsshim + +import ( + "context" + "io" + "sync" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" +) + +// ContainerError is an error encountered in HCS +type process struct { + p *hcs.Process + waitOnce sync.Once + waitCh chan struct{} + waitErr error +} + +// Pid returns the process ID of the process within the container. +func (process *process) Pid() int { + return process.p.Pid() +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *process) Kill() error { + found, err := process.p.Kill(context.Background()) + if err != nil { + return convertProcessError(err, process) + } + if !found { + return &ProcessError{Process: process, Err: ErrElementNotFound, Operation: "hcsshim::Process::Kill"} + } + return nil +} + +// Wait waits for the process to exit. +func (process *process) Wait() error { + return convertProcessError(process.p.Wait(), process) +} + +// WaitTimeout waits for the process to exit or the duration to elapse. It returns +// false if timeout occurs. +func (process *process) WaitTimeout(timeout time.Duration) error { + process.waitOnce.Do(func() { + process.waitCh = make(chan struct{}) + go func() { + process.waitErr = process.Wait() + close(process.waitCh) + }() + }) + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-t.C: + return &ProcessError{Process: process, Err: ErrTimeout, Operation: "hcsshim::Process::Wait"} + case <-process.waitCh: + return process.waitErr + } +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *process) ExitCode() (int, error) { + code, err := process.p.ExitCode() + if err != nil { + err = convertProcessError(err, process) + } + return code, err +} + +// ResizeConsole resizes the console of the process. +func (process *process) ResizeConsole(width, height uint16) error { + return convertProcessError(process.p.ResizeConsole(context.Background(), width, height), process) +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes; it should be possible to +// call this multiple times to get multiple interfaces. +func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { + stdin, stdout, stderr, err := process.p.StdioLegacy() + if err != nil { + err = convertProcessError(err, process) + } + return stdin, stdout, stderr, err +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *process) CloseStdin() error { + return convertProcessError(process.p.CloseStdin(context.Background()), process) +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *process) Close() error { + return convertProcessError(process.p.Close(), process) +} diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go new file mode 100644 index 0000000000..9b619b6e62 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go @@ -0,0 +1,57 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package hcsshim + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + + procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") +) + +func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { + r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/containerd/cgroups/v3/LICENSE b/vendor/github.com/containerd/cgroups/v3/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/containerd/cgroups/v3/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go new file mode 100644 index 0000000000..e51e12f800 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package stats diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go new file mode 100644 index 0000000000..75206889ba --- /dev/null +++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go @@ -0,0 +1,1959 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: github.com/containerd/cgroups/cgroup1/stats/metrics.proto + +package stats + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Metrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` + Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` + CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` + Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` + Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` + Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` + MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` +} + +func (x *Metrics) Reset() { + *x = Metrics{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metrics) ProtoMessage() {} + +func (x *Metrics) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metrics.ProtoReflect.Descriptor instead. +func (*Metrics) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{0} +} + +func (x *Metrics) GetHugetlb() []*HugetlbStat { + if x != nil { + return x.Hugetlb + } + return nil +} + +func (x *Metrics) GetPids() *PidsStat { + if x != nil { + return x.Pids + } + return nil +} + +func (x *Metrics) GetCPU() *CPUStat { + if x != nil { + return x.CPU + } + return nil +} + +func (x *Metrics) GetMemory() *MemoryStat { + if x != nil { + return x.Memory + } + return nil +} + +func (x *Metrics) GetBlkio() *BlkIOStat { + if x != nil { + return x.Blkio + } + return nil +} + +func (x *Metrics) GetRdma() *RdmaStat { + if x != nil { + return x.Rdma + } + return nil +} + +func (x *Metrics) GetNetwork() []*NetworkStat { + if x != nil { + return x.Network + } + return nil +} + +func (x *Metrics) GetCgroupStats() *CgroupStats { + if x != nil { + return x.CgroupStats + } + return nil +} + +func (x *Metrics) GetMemoryOomControl() *MemoryOomControl { + if x != nil { + return x.MemoryOomControl + } + return nil +} + +type HugetlbStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` +} + +func (x *HugetlbStat) Reset() { + *x = HugetlbStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HugetlbStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HugetlbStat) ProtoMessage() {} + +func (x *HugetlbStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HugetlbStat.ProtoReflect.Descriptor instead. +func (*HugetlbStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{1} +} + +func (x *HugetlbStat) GetUsage() uint64 { + if x != nil { + return x.Usage + } + return 0 +} + +func (x *HugetlbStat) GetMax() uint64 { + if x != nil { + return x.Max + } + return 0 +} + +func (x *HugetlbStat) GetFailcnt() uint64 { + if x != nil { + return x.Failcnt + } + return 0 +} + +func (x *HugetlbStat) GetPagesize() string { + if x != nil { + return x.Pagesize + } + return "" +} + +type PidsStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *PidsStat) Reset() { + *x = PidsStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PidsStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PidsStat) ProtoMessage() {} + +func (x *PidsStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PidsStat.ProtoReflect.Descriptor instead. +func (*PidsStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{2} +} + +func (x *PidsStat) GetCurrent() uint64 { + if x != nil { + return x.Current + } + return 0 +} + +func (x *PidsStat) GetLimit() uint64 { + if x != nil { + return x.Limit + } + return 0 +} + +type CPUStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"` + Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"` +} + +func (x *CPUStat) Reset() { + *x = CPUStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CPUStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CPUStat) ProtoMessage() {} + +func (x *CPUStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CPUStat.ProtoReflect.Descriptor instead. +func (*CPUStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{3} +} + +func (x *CPUStat) GetUsage() *CPUUsage { + if x != nil { + return x.Usage + } + return nil +} + +func (x *CPUStat) GetThrottling() *Throttle { + if x != nil { + return x.Throttling + } + return nil +} + +type CPUUsage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // values in nanoseconds + Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` + User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` + PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"` +} + +func (x *CPUUsage) Reset() { + *x = CPUUsage{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CPUUsage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CPUUsage) ProtoMessage() {} + +func (x *CPUUsage) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CPUUsage.ProtoReflect.Descriptor instead. +func (*CPUUsage) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{4} +} + +func (x *CPUUsage) GetTotal() uint64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *CPUUsage) GetKernel() uint64 { + if x != nil { + return x.Kernel + } + return 0 +} + +func (x *CPUUsage) GetUser() uint64 { + if x != nil { + return x.User + } + return 0 +} + +func (x *CPUUsage) GetPerCPU() []uint64 { + if x != nil { + return x.PerCPU + } + return nil +} + +type Throttle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` +} + +func (x *Throttle) Reset() { + *x = Throttle{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Throttle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Throttle) ProtoMessage() {} + +func (x *Throttle) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Throttle.ProtoReflect.Descriptor instead. +func (*Throttle) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{5} +} + +func (x *Throttle) GetPeriods() uint64 { + if x != nil { + return x.Periods + } + return 0 +} + +func (x *Throttle) GetThrottledPeriods() uint64 { + if x != nil { + return x.ThrottledPeriods + } + return 0 +} + +func (x *Throttle) GetThrottledTime() uint64 { + if x != nil { + return x.ThrottledTime + } + return 0 +} + +type MemoryStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` + RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` + RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` + MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` + Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` + Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` + PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` + PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` + PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` + PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` + InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` + ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` + InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` + ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` + Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` + HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` + HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` + TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` + TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` + TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` + TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` + TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` + TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` + TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` + TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` + TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` + TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` + TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` + TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` + TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` + TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` + TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` + Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"` + Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"` + Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"` + KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` +} + +func (x *MemoryStat) Reset() { + *x = MemoryStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemoryStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemoryStat) ProtoMessage() {} + +func (x *MemoryStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemoryStat.ProtoReflect.Descriptor instead. +func (*MemoryStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{6} +} + +func (x *MemoryStat) GetCache() uint64 { + if x != nil { + return x.Cache + } + return 0 +} + +func (x *MemoryStat) GetRSS() uint64 { + if x != nil { + return x.RSS + } + return 0 +} + +func (x *MemoryStat) GetRSSHuge() uint64 { + if x != nil { + return x.RSSHuge + } + return 0 +} + +func (x *MemoryStat) GetMappedFile() uint64 { + if x != nil { + return x.MappedFile + } + return 0 +} + +func (x *MemoryStat) GetDirty() uint64 { + if x != nil { + return x.Dirty + } + return 0 +} + +func (x *MemoryStat) GetWriteback() uint64 { + if x != nil { + return x.Writeback + } + return 0 +} + +func (x *MemoryStat) GetPgPgIn() uint64 { + if x != nil { + return x.PgPgIn + } + return 0 +} + +func (x *MemoryStat) GetPgPgOut() uint64 { + if x != nil { + return x.PgPgOut + } + return 0 +} + +func (x *MemoryStat) GetPgFault() uint64 { + if x != nil { + return x.PgFault + } + return 0 +} + +func (x *MemoryStat) GetPgMajFault() uint64 { + if x != nil { + return x.PgMajFault + } + return 0 +} + +func (x *MemoryStat) GetInactiveAnon() uint64 { + if x != nil { + return x.InactiveAnon + } + return 0 +} + +func (x *MemoryStat) GetActiveAnon() uint64 { + if x != nil { + return x.ActiveAnon + } + return 0 +} + +func (x *MemoryStat) GetInactiveFile() uint64 { + if x != nil { + return x.InactiveFile + } + return 0 +} + +func (x *MemoryStat) GetActiveFile() uint64 { + if x != nil { + return x.ActiveFile + } + return 0 +} + +func (x *MemoryStat) GetUnevictable() uint64 { + if x != nil { + return x.Unevictable + } + return 0 +} + +func (x *MemoryStat) GetHierarchicalMemoryLimit() uint64 { + if x != nil { + return x.HierarchicalMemoryLimit + } + return 0 +} + +func (x *MemoryStat) GetHierarchicalSwapLimit() uint64 { + if x != nil { + return x.HierarchicalSwapLimit + } + return 0 +} + +func (x *MemoryStat) GetTotalCache() uint64 { + if x != nil { + return x.TotalCache + } + return 0 +} + +func (x *MemoryStat) GetTotalRSS() uint64 { + if x != nil { + return x.TotalRSS + } + return 0 +} + +func (x *MemoryStat) GetTotalRSSHuge() uint64 { + if x != nil { + return x.TotalRSSHuge + } + return 0 +} + +func (x *MemoryStat) GetTotalMappedFile() uint64 { + if x != nil { + return x.TotalMappedFile + } + return 0 +} + +func (x *MemoryStat) GetTotalDirty() uint64 { + if x != nil { + return x.TotalDirty + } + return 0 +} + +func (x *MemoryStat) GetTotalWriteback() uint64 { + if x != nil { + return x.TotalWriteback + } + return 0 +} + +func (x *MemoryStat) GetTotalPgPgIn() uint64 { + if x != nil { + return x.TotalPgPgIn + } + return 0 +} + +func (x *MemoryStat) GetTotalPgPgOut() uint64 { + if x != nil { + return x.TotalPgPgOut + } + return 0 +} + +func (x *MemoryStat) GetTotalPgFault() uint64 { + if x != nil { + return x.TotalPgFault + } + return 0 +} + +func (x *MemoryStat) GetTotalPgMajFault() uint64 { + if x != nil { + return x.TotalPgMajFault + } + return 0 +} + +func (x *MemoryStat) GetTotalInactiveAnon() uint64 { + if x != nil { + return x.TotalInactiveAnon + } + return 0 +} + +func (x *MemoryStat) GetTotalActiveAnon() uint64 { + if x != nil { + return x.TotalActiveAnon + } + return 0 +} + +func (x *MemoryStat) GetTotalInactiveFile() uint64 { + if x != nil { + return x.TotalInactiveFile + } + return 0 +} + +func (x *MemoryStat) GetTotalActiveFile() uint64 { + if x != nil { + return x.TotalActiveFile + } + return 0 +} + +func (x *MemoryStat) GetTotalUnevictable() uint64 { + if x != nil { + return x.TotalUnevictable + } + return 0 +} + +func (x *MemoryStat) GetUsage() *MemoryEntry { + if x != nil { + return x.Usage + } + return nil +} + +func (x *MemoryStat) GetSwap() *MemoryEntry { + if x != nil { + return x.Swap + } + return nil +} + +func (x *MemoryStat) GetKernel() *MemoryEntry { + if x != nil { + return x.Kernel + } + return nil +} + +func (x *MemoryStat) GetKernelTCP() *MemoryEntry { + if x != nil { + return x.KernelTCP + } + return nil +} + +type MemoryEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` +} + +func (x *MemoryEntry) Reset() { + *x = MemoryEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemoryEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemoryEntry) ProtoMessage() {} + +func (x *MemoryEntry) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemoryEntry.ProtoReflect.Descriptor instead. +func (*MemoryEntry) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{7} +} + +func (x *MemoryEntry) GetLimit() uint64 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *MemoryEntry) GetUsage() uint64 { + if x != nil { + return x.Usage + } + return 0 +} + +func (x *MemoryEntry) GetMax() uint64 { + if x != nil { + return x.Max + } + return 0 +} + +func (x *MemoryEntry) GetFailcnt() uint64 { + if x != nil { + return x.Failcnt + } + return 0 +} + +type MemoryOomControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` + UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` + OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` +} + +func (x *MemoryOomControl) Reset() { + *x = MemoryOomControl{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemoryOomControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemoryOomControl) ProtoMessage() {} + +func (x *MemoryOomControl) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemoryOomControl.ProtoReflect.Descriptor instead. +func (*MemoryOomControl) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{8} +} + +func (x *MemoryOomControl) GetOomKillDisable() uint64 { + if x != nil { + return x.OomKillDisable + } + return 0 +} + +func (x *MemoryOomControl) GetUnderOom() uint64 { + if x != nil { + return x.UnderOom + } + return 0 +} + +func (x *MemoryOomControl) GetOomKill() uint64 { + if x != nil { + return x.OomKill + } + return 0 +} + +type BlkIOStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"` +} + +func (x *BlkIOStat) Reset() { + *x = BlkIOStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlkIOStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlkIOStat) ProtoMessage() {} + +func (x *BlkIOStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlkIOStat.ProtoReflect.Descriptor instead. +func (*BlkIOStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{9} +} + +func (x *BlkIOStat) GetIoServiceBytesRecursive() []*BlkIOEntry { + if x != nil { + return x.IoServiceBytesRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoServicedRecursive() []*BlkIOEntry { + if x != nil { + return x.IoServicedRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoQueuedRecursive() []*BlkIOEntry { + if x != nil { + return x.IoQueuedRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoServiceTimeRecursive() []*BlkIOEntry { + if x != nil { + return x.IoServiceTimeRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoWaitTimeRecursive() []*BlkIOEntry { + if x != nil { + return x.IoWaitTimeRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoMergedRecursive() []*BlkIOEntry { + if x != nil { + return x.IoMergedRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoTimeRecursive() []*BlkIOEntry { + if x != nil { + return x.IoTimeRecursive + } + return nil +} + +func (x *BlkIOStat) GetSectorsRecursive() []*BlkIOEntry { + if x != nil { + return x.SectorsRecursive + } + return nil +} + +type BlkIOEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` + Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` + Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *BlkIOEntry) Reset() { + *x = BlkIOEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlkIOEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlkIOEntry) ProtoMessage() {} + +func (x *BlkIOEntry) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlkIOEntry.ProtoReflect.Descriptor instead. +func (*BlkIOEntry) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{10} +} + +func (x *BlkIOEntry) GetOp() string { + if x != nil { + return x.Op + } + return "" +} + +func (x *BlkIOEntry) GetDevice() string { + if x != nil { + return x.Device + } + return "" +} + +func (x *BlkIOEntry) GetMajor() uint64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *BlkIOEntry) GetMinor() uint64 { + if x != nil { + return x.Minor + } + return 0 +} + +func (x *BlkIOEntry) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +type RdmaStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` + Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *RdmaStat) Reset() { + *x = RdmaStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RdmaStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RdmaStat) ProtoMessage() {} + +func (x *RdmaStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RdmaStat.ProtoReflect.Descriptor instead. +func (*RdmaStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{11} +} + +func (x *RdmaStat) GetCurrent() []*RdmaEntry { + if x != nil { + return x.Current + } + return nil +} + +func (x *RdmaStat) GetLimit() []*RdmaEntry { + if x != nil { + return x.Limit + } + return nil +} + +type RdmaEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` + HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` +} + +func (x *RdmaEntry) Reset() { + *x = RdmaEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RdmaEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RdmaEntry) ProtoMessage() {} + +func (x *RdmaEntry) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RdmaEntry.ProtoReflect.Descriptor instead. +func (*RdmaEntry) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{12} +} + +func (x *RdmaEntry) GetDevice() string { + if x != nil { + return x.Device + } + return "" +} + +func (x *RdmaEntry) GetHcaHandles() uint32 { + if x != nil { + return x.HcaHandles + } + return 0 +} + +func (x *RdmaEntry) GetHcaObjects() uint32 { + if x != nil { + return x.HcaObjects + } + return 0 +} + +type NetworkStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` +} + +func (x *NetworkStat) Reset() { + *x = NetworkStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkStat) ProtoMessage() {} + +func (x *NetworkStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkStat.ProtoReflect.Descriptor instead. +func (*NetworkStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{13} +} + +func (x *NetworkStat) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NetworkStat) GetRxBytes() uint64 { + if x != nil { + return x.RxBytes + } + return 0 +} + +func (x *NetworkStat) GetRxPackets() uint64 { + if x != nil { + return x.RxPackets + } + return 0 +} + +func (x *NetworkStat) GetRxErrors() uint64 { + if x != nil { + return x.RxErrors + } + return 0 +} + +func (x *NetworkStat) GetRxDropped() uint64 { + if x != nil { + return x.RxDropped + } + return 0 +} + +func (x *NetworkStat) GetTxBytes() uint64 { + if x != nil { + return x.TxBytes + } + return 0 +} + +func (x *NetworkStat) GetTxPackets() uint64 { + if x != nil { + return x.TxPackets + } + return 0 +} + +func (x *NetworkStat) GetTxErrors() uint64 { + if x != nil { + return x.TxErrors + } + return 0 +} + +func (x *NetworkStat) GetTxDropped() uint64 { + if x != nil { + return x.TxDropped + } + return 0 +} + +// CgroupStats exports per-cgroup statistics. +type CgroupStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // number of tasks sleeping + NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` + // number of tasks running + NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` + // number of tasks in stopped state + NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` + // number of tasks in uninterruptible state + NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` + // number of tasks waiting on IO + NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` +} + +func (x *CgroupStats) Reset() { + *x = CgroupStats{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CgroupStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CgroupStats) ProtoMessage() {} + +func (x *CgroupStats) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CgroupStats.ProtoReflect.Descriptor instead. +func (*CgroupStats) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{14} +} + +func (x *CgroupStats) GetNrSleeping() uint64 { + if x != nil { + return x.NrSleeping + } + return 0 +} + +func (x *CgroupStats) GetNrRunning() uint64 { + if x != nil { + return x.NrRunning + } + return 0 +} + +func (x *CgroupStats) GetNrStopped() uint64 { + if x != nil { + return x.NrStopped + } + return 0 +} + +func (x *CgroupStats) GetNrUninterruptible() uint64 { + if x != nil { + return x.NrUninterruptible + } + return 0 +} + +func (x *CgroupStats) GetNrIoWait() uint64 { + if x != nil { + return x.NrIoWait + } + return 0 +} + +var File_github_com_containerd_cgroups_cgroup1_stats_metrics_proto protoreflect.FileDescriptor + +var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x69, 0x6f, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x2e, 0x76, 0x31, 0x22, 0xcd, 0x04, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x12, 0x3f, 0x0a, 0x07, 0x68, 0x75, 0x67, 0x65, 0x74, 0x6c, 0x62, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x75, + 0x67, 0x65, 0x74, 0x6c, 0x62, 0x53, 0x74, 0x61, 0x74, 0x52, 0x07, 0x68, 0x75, 0x67, 0x65, 0x74, + 0x6c, 0x62, 0x12, 0x36, 0x0a, 0x04, 0x70, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x64, 0x73, + 0x53, 0x74, 0x61, 0x74, 0x52, 0x04, 0x70, 0x69, 0x64, 0x73, 0x12, 0x33, 0x0a, 0x03, 0x63, 0x70, + 0x75, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x50, 0x55, 0x53, 0x74, 0x61, 0x74, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, + 0x3c, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x53, 0x74, 0x61, 0x74, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x39, 0x0a, + 0x05, 0x62, 0x6c, 0x6b, 0x69, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, + 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x53, 0x74, 0x61, + 0x74, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x69, 0x6f, 0x12, 0x36, 0x0a, 0x04, 0x72, 0x64, 0x6d, 0x61, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x52, 0x04, 0x72, 0x64, 0x6d, 0x61, + 0x12, 0x3f, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x12, 0x6d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6f, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4f, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4f, 0x6f, 0x6d, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x22, 0x6b, 0x0a, 0x0b, 0x48, 0x75, 0x67, 0x65, 0x74, 0x6c, 0x62, + 0x53, 0x74, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, + 0x66, 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, + 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x67, 0x65, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x73, 0x69, + 0x7a, 0x65, 0x22, 0x3a, 0x0a, 0x08, 0x50, 0x69, 0x64, 0x73, 0x53, 0x74, 0x61, 0x74, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x87, + 0x01, 0x0a, 0x07, 0x43, 0x50, 0x55, 0x53, 0x74, 0x61, 0x74, 0x12, 0x38, 0x0a, 0x05, 0x75, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x50, 0x55, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x75, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x52, 0x0a, 0x74, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x65, 0x0a, 0x08, 0x43, 0x50, 0x55, 0x55, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x6b, 0x65, + 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, + 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, + 0x75, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x06, 0x70, 0x65, 0x72, 0x43, 0x70, 0x75, 0x22, + 0x78, 0x0a, 0x08, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x65, + 0x72, 0x69, 0x6f, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, + 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x50, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x74, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x94, 0x0b, 0x0a, 0x0a, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x61, 0x63, 0x68, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x72, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x73, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x72, 0x73, 0x73, 0x5f, 0x68, 0x75, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x72, 0x73, 0x73, 0x48, 0x75, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, + 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x64, 0x69, 0x72, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x64, 0x69, 0x72, + 0x74, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, + 0x12, 0x18, 0x0a, 0x08, 0x70, 0x67, 0x5f, 0x70, 0x67, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x06, 0x70, 0x67, 0x50, 0x67, 0x49, 0x6e, 0x12, 0x1a, 0x0a, 0x09, 0x70, 0x67, + 0x5f, 0x70, 0x67, 0x5f, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, + 0x67, 0x50, 0x67, 0x4f, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x67, 0x5f, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x67, 0x46, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x70, 0x67, 0x5f, 0x6d, 0x61, 0x6a, 0x5f, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x70, 0x67, 0x4d, 0x61, 0x6a, 0x46, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x69, 0x6e, 0x61, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0c, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, + 0x6c, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, + 0x61, 0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x36, 0x0a, + 0x17, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x77, + 0x61, 0x70, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, + 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x77, 0x61, 0x70, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x72, 0x73, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x52, 0x73, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x73, 0x73, + 0x5f, 0x68, 0x75, 0x67, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x52, 0x73, 0x73, 0x48, 0x75, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x70, 0x65, + 0x64, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, + 0x69, 0x72, 0x74, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x44, 0x69, 0x72, 0x74, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x12, + 0x23, 0x0a, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, 0x5f, 0x70, 0x67, 0x5f, 0x69, + 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, + 0x50, 0x67, 0x49, 0x6e, 0x12, 0x25, 0x0a, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, + 0x5f, 0x70, 0x67, 0x5f, 0x6f, 0x75, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, 0x50, 0x67, 0x4f, 0x75, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, 0x46, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, 0x5f, 0x6d, 0x61, + 0x6a, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, 0x4d, 0x61, 0x6a, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x2e, + 0x0a, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x49, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x2a, + 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x61, + 0x6e, 0x6f, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x6e, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, + 0x1f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x55, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x21, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x39, 0x0a, 0x04, 0x73, 0x77, 0x61, 0x70, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x73, 0x77, 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x06, 0x6b, + 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x44, 0x0a, 0x0a, 0x6b, 0x65, + 0x72, 0x6e, 0x65, 0x6c, 0x5f, 0x74, 0x63, 0x70, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x63, 0x70, + 0x22, 0x65, 0x0a, 0x0b, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, + 0x61, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x18, 0x0a, + 0x07, 0x66, 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x66, 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x22, 0x74, 0x0a, 0x10, 0x4d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x4f, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x6f, + 0x6f, 0x6d, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6f, 0x6f, 0x6d, 0x4b, 0x69, 0x6c, 0x6c, 0x44, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x6f, + 0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x4f, + 0x6f, 0x6d, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6f, 0x6d, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x6f, 0x6d, 0x4b, 0x69, 0x6c, 0x6c, 0x22, 0xd5, 0x05, + 0x0a, 0x09, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x53, 0x74, 0x61, 0x74, 0x12, 0x61, 0x0a, 0x1a, 0x69, + 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x69, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x58, + 0x0a, 0x15, 0x69, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x5f, 0x72, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x13, 0x69, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x52, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x54, 0x0a, 0x13, 0x69, 0x6f, 0x5f, 0x71, + 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x69, 0x6f, 0x51, + 0x75, 0x65, 0x75, 0x65, 0x64, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x5f, + 0x0a, 0x19, 0x69, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, + 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x69, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, + 0x59, 0x0a, 0x16, 0x69, 0x6f, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x69, 0x6f, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x54, 0x0a, 0x13, 0x69, 0x6f, + 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, + 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x69, + 0x6f, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, + 0x12, 0x50, 0x0a, 0x11, 0x69, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0f, 0x69, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, + 0x76, 0x65, 0x12, 0x51, 0x0a, 0x11, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x72, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x76, 0x0a, 0x0a, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x6f, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x84, 0x01, + 0x0a, 0x08, 0x52, 0x64, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x12, 0x3d, 0x0a, 0x07, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x22, 0x65, 0x0a, 0x09, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63, 0x61, + 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, + 0x68, 0x63, 0x61, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63, + 0x61, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0a, 0x68, 0x63, 0x61, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x22, 0x8d, 0x02, 0x0a, 0x0b, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x07, 0x72, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x78, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x72, 0x78, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x78, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x78, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x78, 0x5f, 0x64, 0x72, 0x6f, + 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x72, 0x78, 0x44, 0x72, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x78, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x78, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x78, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x74, 0x78, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x74, 0x78, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x74, 0x78, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x0b, + 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, + 0x72, 0x5f, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x6e, 0x72, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, + 0x6e, 0x72, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x6e, 0x72, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, + 0x72, 0x5f, 0x73, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x6e, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x12, 0x2d, 0x0a, 0x12, 0x6e, 0x72, + 0x5f, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x6e, 0x72, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x0a, 0x6e, 0x72, 0x5f, + 0x69, 0x6f, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, + 0x72, 0x49, 0x6f, 0x57, 0x61, 0x69, 0x74, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x31, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescOnce sync.Once + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData = file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc +) + +func file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP() []byte { + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescOnce.Do(func() { + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData) + }) + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData +} + +var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_goTypes = []interface{}{ + (*Metrics)(nil), // 0: io.containerd.cgroups.v1.Metrics + (*HugetlbStat)(nil), // 1: io.containerd.cgroups.v1.HugetlbStat + (*PidsStat)(nil), // 2: io.containerd.cgroups.v1.PidsStat + (*CPUStat)(nil), // 3: io.containerd.cgroups.v1.CPUStat + (*CPUUsage)(nil), // 4: io.containerd.cgroups.v1.CPUUsage + (*Throttle)(nil), // 5: io.containerd.cgroups.v1.Throttle + (*MemoryStat)(nil), // 6: io.containerd.cgroups.v1.MemoryStat + (*MemoryEntry)(nil), // 7: io.containerd.cgroups.v1.MemoryEntry + (*MemoryOomControl)(nil), // 8: io.containerd.cgroups.v1.MemoryOomControl + (*BlkIOStat)(nil), // 9: io.containerd.cgroups.v1.BlkIOStat + (*BlkIOEntry)(nil), // 10: io.containerd.cgroups.v1.BlkIOEntry + (*RdmaStat)(nil), // 11: io.containerd.cgroups.v1.RdmaStat + (*RdmaEntry)(nil), // 12: io.containerd.cgroups.v1.RdmaEntry + (*NetworkStat)(nil), // 13: io.containerd.cgroups.v1.NetworkStat + (*CgroupStats)(nil), // 14: io.containerd.cgroups.v1.CgroupStats +} +var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_depIdxs = []int32{ + 1, // 0: io.containerd.cgroups.v1.Metrics.hugetlb:type_name -> io.containerd.cgroups.v1.HugetlbStat + 2, // 1: io.containerd.cgroups.v1.Metrics.pids:type_name -> io.containerd.cgroups.v1.PidsStat + 3, // 2: io.containerd.cgroups.v1.Metrics.cpu:type_name -> io.containerd.cgroups.v1.CPUStat + 6, // 3: io.containerd.cgroups.v1.Metrics.memory:type_name -> io.containerd.cgroups.v1.MemoryStat + 9, // 4: io.containerd.cgroups.v1.Metrics.blkio:type_name -> io.containerd.cgroups.v1.BlkIOStat + 11, // 5: io.containerd.cgroups.v1.Metrics.rdma:type_name -> io.containerd.cgroups.v1.RdmaStat + 13, // 6: io.containerd.cgroups.v1.Metrics.network:type_name -> io.containerd.cgroups.v1.NetworkStat + 14, // 7: io.containerd.cgroups.v1.Metrics.cgroup_stats:type_name -> io.containerd.cgroups.v1.CgroupStats + 8, // 8: io.containerd.cgroups.v1.Metrics.memory_oom_control:type_name -> io.containerd.cgroups.v1.MemoryOomControl + 4, // 9: io.containerd.cgroups.v1.CPUStat.usage:type_name -> io.containerd.cgroups.v1.CPUUsage + 5, // 10: io.containerd.cgroups.v1.CPUStat.throttling:type_name -> io.containerd.cgroups.v1.Throttle + 7, // 11: io.containerd.cgroups.v1.MemoryStat.usage:type_name -> io.containerd.cgroups.v1.MemoryEntry + 7, // 12: io.containerd.cgroups.v1.MemoryStat.swap:type_name -> io.containerd.cgroups.v1.MemoryEntry + 7, // 13: io.containerd.cgroups.v1.MemoryStat.kernel:type_name -> io.containerd.cgroups.v1.MemoryEntry + 7, // 14: io.containerd.cgroups.v1.MemoryStat.kernel_tcp:type_name -> io.containerd.cgroups.v1.MemoryEntry + 10, // 15: io.containerd.cgroups.v1.BlkIOStat.io_service_bytes_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 16: io.containerd.cgroups.v1.BlkIOStat.io_serviced_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 17: io.containerd.cgroups.v1.BlkIOStat.io_queued_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 18: io.containerd.cgroups.v1.BlkIOStat.io_service_time_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 19: io.containerd.cgroups.v1.BlkIOStat.io_wait_time_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 20: io.containerd.cgroups.v1.BlkIOStat.io_merged_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 21: io.containerd.cgroups.v1.BlkIOStat.io_time_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 22: io.containerd.cgroups.v1.BlkIOStat.sectors_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 12, // 23: io.containerd.cgroups.v1.RdmaStat.current:type_name -> io.containerd.cgroups.v1.RdmaEntry + 12, // 24: io.containerd.cgroups.v1.RdmaStat.limit:type_name -> io.containerd.cgroups.v1.RdmaEntry + 25, // [25:25] is the sub-list for method output_type + 25, // [25:25] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_init() } +func file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_init() { + if File_github_com_containerd_cgroups_cgroup1_stats_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HugetlbStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PidsStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CPUStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CPUUsage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Throttle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemoryStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemoryEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemoryOomControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlkIOStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlkIOEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RdmaStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RdmaEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CgroupStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_goTypes, + DependencyIndexes: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_depIdxs, + MessageInfos: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes, + }.Build() + File_github_com_containerd_cgroups_cgroup1_stats_metrics_proto = out.File + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc = nil + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_goTypes = nil + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt new file mode 100644 index 0000000000..7e4313ea58 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt @@ -0,0 +1,771 @@ +file { + name: "github.com/containerd/cgroups/cgroup1/stats/metrics.proto" + package: "io.containerd.cgroups.v1" + message_type { + name: "Metrics" + field { + name: "hugetlb" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.HugetlbStat" + json_name: "hugetlb" + } + field { + name: "pids" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.PidsStat" + json_name: "pids" + } + field { + name: "cpu" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUStat" + json_name: "cpu" + } + field { + name: "memory" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryStat" + json_name: "memory" + } + field { + name: "blkio" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOStat" + json_name: "blkio" + } + field { + name: "rdma" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaStat" + json_name: "rdma" + } + field { + name: "network" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.NetworkStat" + json_name: "network" + } + field { + name: "cgroup_stats" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CgroupStats" + json_name: "cgroupStats" + } + field { + name: "memory_oom_control" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryOomControl" + json_name: "memoryOomControl" + } + } + message_type { + name: "HugetlbStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + field { + name: "pagesize" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "pagesize" + } + } + message_type { + name: "PidsStat" + field { + name: "current" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + } + message_type { + name: "CPUStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUUsage" + json_name: "usage" + } + field { + name: "throttling" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.Throttle" + json_name: "throttling" + } + } + message_type { + name: "CPUUsage" + field { + name: "total" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "total" + } + field { + name: "kernel" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "kernel" + } + field { + name: "user" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "user" + } + field { + name: "per_cpu" + number: 4 + label: LABEL_REPEATED + type: TYPE_UINT64 + json_name: "perCpu" + } + } + message_type { + name: "Throttle" + field { + name: "periods" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "periods" + } + field { + name: "throttled_periods" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledPeriods" + } + field { + name: "throttled_time" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledTime" + } + } + message_type { + name: "MemoryStat" + field { + name: "cache" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "cache" + } + field { + name: "rss" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rss" + } + field { + name: "rss_huge" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rssHuge" + } + field { + name: "mapped_file" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "mappedFile" + } + field { + name: "dirty" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "dirty" + } + field { + name: "writeback" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "writeback" + } + field { + name: "pg_pg_in" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgIn" + } + field { + name: "pg_pg_out" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgOut" + } + field { + name: "pg_fault" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgFault" + } + field { + name: "pg_maj_fault" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgMajFault" + } + field { + name: "inactive_anon" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveAnon" + } + field { + name: "active_anon" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeAnon" + } + field { + name: "inactive_file" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveFile" + } + field { + name: "active_file" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeFile" + } + field { + name: "unevictable" + number: 15 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "unevictable" + } + field { + name: "hierarchical_memory_limit" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalMemoryLimit" + } + field { + name: "hierarchical_swap_limit" + number: 17 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalSwapLimit" + } + field { + name: "total_cache" + number: 18 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalCache" + } + field { + name: "total_rss" + number: 19 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalRss" + } + field { + name: "total_rss_huge" + number: 20 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalRssHuge" + } + field { + name: "total_mapped_file" + number: 21 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalMappedFile" + } + field { + name: "total_dirty" + number: 22 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalDirty" + } + field { + name: "total_writeback" + number: 23 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalWriteback" + } + field { + name: "total_pg_pg_in" + number: 24 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgIn" + } + field { + name: "total_pg_pg_out" + number: 25 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgOut" + } + field { + name: "total_pg_fault" + number: 26 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgFault" + } + field { + name: "total_pg_maj_fault" + number: 27 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgMajFault" + } + field { + name: "total_inactive_anon" + number: 28 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveAnon" + } + field { + name: "total_active_anon" + number: 29 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveAnon" + } + field { + name: "total_inactive_file" + number: 30 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveFile" + } + field { + name: "total_active_file" + number: 31 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveFile" + } + field { + name: "total_unevictable" + number: 32 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalUnevictable" + } + field { + name: "usage" + number: 33 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "usage" + } + field { + name: "swap" + number: 34 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "swap" + } + field { + name: "kernel" + number: 35 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "kernel" + } + field { + name: "kernel_tcp" + number: 36 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "kernelTcp" + } + } + message_type { + name: "MemoryEntry" + field { + name: "limit" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + field { + name: "usage" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + } + message_type { + name: "MemoryOomControl" + field { + name: "oom_kill_disable" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKillDisable" + } + field { + name: "under_oom" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "underOom" + } + field { + name: "oom_kill" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKill" + } + } + message_type { + name: "BlkIOStat" + field { + name: "io_service_bytes_recursive" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceBytesRecursive" + } + field { + name: "io_serviced_recursive" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServicedRecursive" + } + field { + name: "io_queued_recursive" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioQueuedRecursive" + } + field { + name: "io_service_time_recursive" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceTimeRecursive" + } + field { + name: "io_wait_time_recursive" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioWaitTimeRecursive" + } + field { + name: "io_merged_recursive" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioMergedRecursive" + } + field { + name: "io_time_recursive" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioTimeRecursive" + } + field { + name: "sectors_recursive" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "sectorsRecursive" + } + } + message_type { + name: "BlkIOEntry" + field { + name: "op" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "op" + } + field { + name: "device" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "major" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "major" + } + field { + name: "minor" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "minor" + } + field { + name: "value" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "value" + } + } + message_type { + name: "RdmaStat" + field { + name: "current" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "limit" + } + } + message_type { + name: "RdmaEntry" + field { + name: "device" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "hca_handles" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaHandles" + } + field { + name: "hca_objects" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaObjects" + } + } + message_type { + name: "NetworkStat" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "rx_bytes" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxBytes" + } + field { + name: "rx_packets" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxPackets" + } + field { + name: "rx_errors" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxErrors" + } + field { + name: "rx_dropped" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxDropped" + } + field { + name: "tx_bytes" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txBytes" + } + field { + name: "tx_packets" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txPackets" + } + field { + name: "tx_errors" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txErrors" + } + field { + name: "tx_dropped" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txDropped" + } + } + message_type { + name: "CgroupStats" + field { + name: "nr_sleeping" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrSleeping" + } + field { + name: "nr_running" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrRunning" + } + field { + name: "nr_stopped" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrStopped" + } + field { + name: "nr_uninterruptible" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrUninterruptible" + } + field { + name: "nr_io_wait" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrIoWait" + } + } + options { + go_package: "github.com/containerd/cgroups/cgroup1/stats" + } + syntax: "proto3" +} diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto new file mode 100644 index 0000000000..e6e4444b1b --- /dev/null +++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto @@ -0,0 +1,158 @@ +syntax = "proto3"; + +package io.containerd.cgroups.v1; + +option go_package = "github.com/containerd/cgroups/cgroup1/stats"; + +message Metrics { + repeated HugetlbStat hugetlb = 1; + PidsStat pids = 2; + CPUStat cpu = 3; + MemoryStat memory = 4; + BlkIOStat blkio = 5; + RdmaStat rdma = 6; + repeated NetworkStat network = 7; + CgroupStats cgroup_stats = 8; + MemoryOomControl memory_oom_control = 9; +} + +message HugetlbStat { + uint64 usage = 1; + uint64 max = 2; + uint64 failcnt = 3; + string pagesize = 4; +} + +message PidsStat { + uint64 current = 1; + uint64 limit = 2; +} + +message CPUStat { + CPUUsage usage = 1; + Throttle throttling = 2; +} + +message CPUUsage { + // values in nanoseconds + uint64 total = 1; + uint64 kernel = 2; + uint64 user = 3; + repeated uint64 per_cpu = 4; + +} + +message Throttle { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message MemoryStat { + uint64 cache = 1; + uint64 rss = 2; + uint64 rss_huge = 3; + uint64 mapped_file = 4; + uint64 dirty = 5; + uint64 writeback = 6; + uint64 pg_pg_in = 7; + uint64 pg_pg_out = 8; + uint64 pg_fault = 9; + uint64 pg_maj_fault = 10; + uint64 inactive_anon = 11; + uint64 active_anon = 12; + uint64 inactive_file = 13; + uint64 active_file = 14; + uint64 unevictable = 15; + uint64 hierarchical_memory_limit = 16; + uint64 hierarchical_swap_limit = 17; + uint64 total_cache = 18; + uint64 total_rss = 19; + uint64 total_rss_huge = 20; + uint64 total_mapped_file = 21; + uint64 total_dirty = 22; + uint64 total_writeback = 23; + uint64 total_pg_pg_in = 24; + uint64 total_pg_pg_out = 25; + uint64 total_pg_fault = 26; + uint64 total_pg_maj_fault = 27; + uint64 total_inactive_anon = 28; + uint64 total_active_anon = 29; + uint64 total_inactive_file = 30; + uint64 total_active_file = 31; + uint64 total_unevictable = 32; + MemoryEntry usage = 33; + MemoryEntry swap = 34; + MemoryEntry kernel = 35; + MemoryEntry kernel_tcp = 36; + +} + +message MemoryEntry { + uint64 limit = 1; + uint64 usage = 2; + uint64 max = 3; + uint64 failcnt = 4; +} + +message MemoryOomControl { + uint64 oom_kill_disable = 1; + uint64 under_oom = 2; + uint64 oom_kill = 3; +} + +message BlkIOStat { + repeated BlkIOEntry io_service_bytes_recursive = 1; + repeated BlkIOEntry io_serviced_recursive = 2; + repeated BlkIOEntry io_queued_recursive = 3; + repeated BlkIOEntry io_service_time_recursive = 4; + repeated BlkIOEntry io_wait_time_recursive = 5; + repeated BlkIOEntry io_merged_recursive = 6; + repeated BlkIOEntry io_time_recursive = 7; + repeated BlkIOEntry sectors_recursive = 8; +} + +message BlkIOEntry { + string op = 1; + string device = 2; + uint64 major = 3; + uint64 minor = 4; + uint64 value = 5; +} + +message RdmaStat { + repeated RdmaEntry current = 1; + repeated RdmaEntry limit = 2; +} + +message RdmaEntry { + string device = 1; + uint32 hca_handles = 2; + uint32 hca_objects = 3; +} + +message NetworkStat { + string name = 1; + uint64 rx_bytes = 2; + uint64 rx_packets = 3; + uint64 rx_errors = 4; + uint64 rx_dropped = 5; + uint64 tx_bytes = 6; + uint64 tx_packets = 7; + uint64 tx_errors = 8; + uint64 tx_dropped = 9; +} + +// CgroupStats exports per-cgroup statistics. +message CgroupStats { + // number of tasks sleeping + uint64 nr_sleeping = 1; + // number of tasks running + uint64 nr_running = 2; + // number of tasks in stopped state + uint64 nr_stopped = 3; + // number of tasks in uninterruptible state + uint64 nr_uninterruptible = 4; + // number of tasks waiting on IO + uint64 nr_io_wait = 5; +} diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 0000000000..584149b6ee --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 0000000000..8915f02773 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 0000000000..8762255970 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,92 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with fmt.Errorf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + "errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Is(err, ErrInvalidArgument) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Is(err, ErrFailedPrecondition) +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, ErrUnavailable) +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Is(err, ErrNotImplemented) +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 0000000000..7a9b33e05a --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = fmt.Errorf("%s: %w", msg, cls) + } else { + err = cls + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 0000000000..6aba0ef1f6 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,689 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "golang.org/x/sync/errgroup" +) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string + compression Compression + ctx context.Context + minChunkSize int +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// This option will be ignored if WithCompression option is used. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// WithCompression specifies compression algorithm to be used. +// Default is gzip. +func WithCompression(compression Compression) Option { + return func(o *options) error { + o.compression = compression + return nil + } +} + +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + +// WithMinChunkSize option specifies the minimal number of bytes of data +// must be written in one gzip stream. +// By increasing this number, one gzip stream can contain multiple files +// and it hopefully leads to smaller result blob. +// NOTE: This adds a TOC property that old reader doesn't understand. +func WithMinChunkSize(minChunkSize int) Option { + return func(o *options) error { + o.minChunkSize = minChunkSize + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd +// or plain tar) passed through the argument. If there are some prioritized files are listed in +// the option, these files are grouped as "prioritized" and can be used for runtime optimization +// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several +// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + if opts.compression == nil { + opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) + } + layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) + } + } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } + }() + tarBlob, err := decompressBlob(tarBlob, layerFiles) + if err != nil { + return nil, err + } + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + var tarParts [][]*entry + if opts.minChunkSize > 0 { + // Each entry needs to know the size of the current gzip stream so they + // cannot be processed in parallel. + tarParts = [][]*entry{entries} + } else { + tarParts = divideEntries(entries, runtime.GOMAXPROCS(0)) + } + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterWithCompressor(esgzFile, opts.compression) + sw.ChunkSize = opts.chunkSize + sw.MinChunkSize = opts.minChunkSize + if sw.needsOpenGzEntries == nil { + sw.needsOpenGzEntries = make(map[string]struct{}) + } + for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} { + sw.needsOpenGzEntries[f] = struct{}{} + } + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + go func() { + r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + defer r.Close() + if _, err := io.Copy(diffID.Hash(), r); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(JTOC) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + return tocAndFooter(ws[0].compressor, mtoc, currentOffset) +} + +func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { + buf := new(bytes.Buffer) + tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) + if err != nil { + return nil, "", err + } + return buf, tocDigest, nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, fmt.Errorf("failed to sort: %w", err) + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + for _, l := range prioritized { + if err := moveRec(l, intar, sorted); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, fmt.Errorf("failed to sort tar entries: %w", err) + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReadSeeker(in) + if err != nil { + return nil, fmt.Errorf("failed to make position watcher: %w", err) + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("failed to parse tar file, %w", err) + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + out.add(e) + in.remove(name) + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + if !okIn && !okOut { + return fmt.Errorf("file: %q: %w", name, errNotFound) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out); err != nil { + return err + } + } + if e, ok := in.get(name); ok { + out.add(e) + in.remove(name) + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump() []*entry { + return f.stream +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := os.CreateTemp(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) { + pos := int64(0) + return &countReadSeeker{r: r, cPos: &pos}, nil +} + +type countReadSeeker struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReadSeeker) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("Unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("Unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReadSeeker) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} + +func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { + if org.Size() < 4 { + return org, nil + } + src := make([]byte, 4) + if _, err := org.Read(src); err != nil && err != io.EOF { + return nil, err + } + var dR io.Reader + if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { + // gzip + dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dgR.Close() + dR = io.Reader(dgR) + } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { + // zstd + dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dzR.Close() + dR = io.Reader(dzR) + } else { + // uncompressed + return io.NewSectionReader(org, 0, org.Size()), nil + } + b, err := tmp.TempFile("", "uncompresseddata") + if err != nil { + return nil, err + } + if _, err := io.Copy(b, dR); err != nil { + return nil, err + } + return fileSectionReader(b) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 0000000000..6de78b02dc --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 0000000000..f4d5546558 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,1223 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *JTOC + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry + + decompressor Decompressor +} + +type openOpts struct { + tocOffset int64 + decompressors []Decompressor + telemetry *Telemetry +} + +// OpenOption is an option used during opening the layer +type OpenOption func(o *openOpts) error + +// WithTOCOffset option specifies the offset of TOC +func WithTOCOffset(tocOffset int64) OpenOption { + return func(o *openOpts) error { + o.tocOffset = tocOffset + return nil + } +} + +// WithDecompressors option specifies decompressors to use. +// Default is gzip-based decompressor. +func WithDecompressors(decompressors ...Decompressor) OpenOption { + return func(o *openOpts) error { + o.decompressors = decompressors + return nil + } +} + +// WithTelemetry option specifies the telemetry hooks +func WithTelemetry(telemetry *Telemetry) OpenOption { + return func(o *openOpts) error { + o.telemetry = telemetry + return nil + } +} + +// MeasureLatencyHook is a func which takes start time and records the diff +type MeasureLatencyHook func(time.Time) + +// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record +// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) +type Telemetry struct { + GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) + GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) + DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) +} + +// Open opens a stargz file for reading. +// The behavior is configurable using options. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { + var opts openOpts + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + + gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} + decompressors := append(gzipCompressors, opts.decompressors...) + + // Determine the size to fetch. Try to fetch as many bytes as possible. + fetchSize := maxFooterSize(sr.Size(), decompressors...) + if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { + if maybeTocOffset > sr.Size() { + return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) + } + fetchSize = sr.Size() - maybeTocOffset + } + + start := time.Now() // before getting layer footer + footer := make([]byte, fetchSize) + if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { + return nil, fmt.Errorf("error reading footer: %v", err) + } + if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { + opts.telemetry.GetFooterLatency(start) + } + + var allErr []error + var found bool + var r *Reader + for _, d := range decompressors { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + maybeTocBytes := footer[:fOffset] + _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) + if err != nil { + allErr = append(allErr, err) + continue + } + if tocOffset >= 0 && tocSize <= 0 { + tocSize = sr.Size() - tocOffset - fSize + } + if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) { + maybeTocBytes = maybeTocBytes[:tocSize] + } + r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) + if err == nil { + found = true + break + } + allErr = append(allErr, err) + } + if !found { + return nil, errorutil.Aggregate(allErr) + } + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +// only supports gzip-based eStargz. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + var allErr []error + for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) + if err == nil { + return tocOffset, fSize, err + } + allErr = append(allErr, err) + } + return 0, 0, errorutil.Aggregate(allErr) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + var chunkTopIndex int + for i, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + switch ent.Type { + case "reg", "chunk": + if ent.Offset != r.toc.Entries[chunkTopIndex].Offset { + chunkTopIndex = i + } + ent.chunkTopIndex = chunkTopIndex + } + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + org, err := r.getSource(ent) + if err != nil { + return err + } + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 && e.InnerOffset == 0 { + lastOffset = e.Offset + } + } + + return nil +} + +func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { + if ent.Type == "hardlink" { + org, ok := r.m[cleanEntryName(ent.LinkName)] + if !ok { + return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + ent, err = r.getSource(org) + if err != nil { + return nil, err + } + } + return ent, nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +func (r *Reader) TOCDigest() digest.Digest { + return r.tocDigest +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + return r.Verifiers() +} + +// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases +// because this doesn't verify TOC. +func (r *Reader) Verifiers() (TOCEntryVerifier, error) { + chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest + regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest + var chunkDigestMapIncomplete bool + var regDigestMapIncomplete bool + var containsChunk bool + for _, e := range r.toc.Entries { + if e.Type != "reg" && e.Type != "chunk" { + continue + } + + // offset must be unique in stargz blob + _, dOK := chunkDigestMap[e.Offset] + _, rOK := regDigestMap[e.Offset] + if dOK || rOK { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + if e.Type == "reg" { + if e.Size == 0 { + continue // ignores empty file + } + + // record the digest of regular file payload + if e.Digest != "" { + d, err := digest.Parse(e.Digest) + if err != nil { + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) + } + regDigestMap[e.Offset] = d + } else { + regDigestMapIncomplete = true + } + } else { + containsChunk = true // this layer contains "chunk" entries. + } + + // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of + // chunked file) + if e.ChunkDigest != "" { + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) + } + chunkDigestMap[e.Offset] = d + } else { + chunkDigestMapIncomplete = true + } + } + + if chunkDigestMapIncomplete { + // Though some chunk digests are not found, if this layer doesn't contain + // "chunk"s and all digest of "reg" files are recorded, we can use them instead. + if !containsChunk && !regDigestMapIncomplete { + return &verifier{digestMap: regDigestMap}, nil + } + return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") + } + + return &verifier{digestMap: chunkDigestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + var err error + e, err = r.getSource(e) + if err != nil { + return nil, false + } + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) newFileReader(name string) (*fileReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + return &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + }, nil +} + +func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + fr.preRead = preRead + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries + preRead func(*TOCEntry, io.Reader) error +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + compressedOff := ent.Offset + // compressedBytesRemain is the number of compressed bytes in this + // file remaining, over 1+ chunks. + compressedBytesRemain := finalEnt.NextOffset() - compressedOff + + sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) + + const maxRead = 2 << 20 + var bufSize = maxRead + if compressedBytesRemain < maxRead { + bufSize = int(compressedBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + dr, err := fr.r.decompressor.Reader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) + } + defer dr.Close() + + if fr.preRead == nil { + if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil { + return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err) + } + return io.ReadFull(dr, p) + } + + var retN int + var retErr error + var found bool + var nr int64 + for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] { + if !e.isDataType() { + continue + } + if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset { + break + } + if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr { + return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err) + } + nr = e.InnerOffset + if e == ent { + found = true + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err) + } + retN, retErr = io.ReadFull(dr, p) + nr += off + int64(retN) + continue + } + cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)} + if err := fr.preRead(e, cr); err != nil { + return 0, fmt.Errorf("failed to pre read: %w", err) + } + nr += cr.n + } + if !found { + return 0, fmt.Errorf("fileReader.ReadAt: target entry not found") + } + return retN, retErr +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *JTOC + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz io.WriteCloser + lastUsername map[int]string + lastGroupname map[int]string + compressor Compressor + + uncompressedCounter *countWriteFlusher + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int + + // MinChunkSize optionally controls the minimum number of bytes + // of data must be written in one gzip stream before a new gzip + // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand. + MinChunkSize int + + needsOpenGzEntries map[string]struct{} +} + +// currentCompressionWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentCompressionWriter struct{ w *Writer } + +func (ccw currentCompressionWriter) Write(p []byte) (int, error) { + ccw.w.diffHash.Write(p) + if ccw.w.gz == nil { + if err := ccw.w.condOpenGz(); err != nil { + return 0, err + } + } + return ccw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. +// TOC JSON and footer are removed. +func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { + footerSize := c.FooterSize() + if sr.Size() < footerSize { + return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) + } + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + return nil, err + } + blobPayloadSize, _, _, err := c.ParseFooter(footer) + if err != nil { + return nil, fmt.Errorf("failed to parse footer: %w", err) + } + if blobPayloadSize < 0 { + blobPayloadSize = sr.Size() + } + return c.Reader(io.LimitReader(sr, blobPayloadSize)) +} + +// NewWriter returns a new stargz writer (gzip-based) writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) +} + +// NewWriterWithCompressor returns a new stargz writer writing to w. +// The compression method is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + uncompressedCounter: &countWriteFlusher{}, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index and footer. + tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) + if err != nil { + return "", err + } + if err := w.bw.Flush(); err != nil { + return "", err + } + + return tocDigest, nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +func (w *Writer) flushGz() error { + if w.closed { + return errors.New("flush on closed Writer") + } + if w.gz != nil { + if f, ok := w.gz.(interface { + Flush() error + }); ok { + return f.Flush() + } + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() (err error) { + if w.gz == nil { + w.gz, err = w.compressor.Writer(w.cw) + if w.gz != nil { + w.gz = w.uncompressedCounter.register(w.gz) + } + } + return +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +func (w *Writer) AppendTar(r io.Reader) error { + return w.appendTar(r, false) +} + +// AppendTarLossLess reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +// +// The difference of this func with AppendTar is that this writes +// the input tar stream into w without any modification (e.g. to header bytes). +// +// Note that if the input tar stream already contains TOC JSON, this returns +// error because w cannot overwrite the TOC JSON to the one generated by w without +// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, +// you shoud decompress it and remove TOC JSON in advance. +func (w *Writer) AppendTarLossLess(r io.Reader) error { + return w.appendTar(r, true) +} + +func (w *Writer) appendTar(r io.Reader, lossless bool) error { + var src io.Reader + br := bufio.NewReader(r) + if isGzip(br) { + zr, _ := gzip.NewReader(br) + src = zr + } else { + src = io.Reader(br) + } + dst := currentCompressionWriter{w} + var tw *tar.Writer + if !lossless { + tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. + } + tr := tar.NewReader(src) + if lossless { + tr.RawAccounting = true + } + prevOffset := w.cw.n + var prevOffsetUncompressed int64 + for { + h, err := tr.Next() + if err == io.EOF { + if lossless { + if remain := tr.RawBytes(); len(remain) > 0 { + // Collect the remaining null bytes. + // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 + if _, err := dst.Write(remain); err != nil { + return err + } + } + } + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if cleanEntryName(h.Name) == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + if lossless { + // We cannot handle this in lossless way. + return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") + } + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + if err := w.condOpenGz(); err != nil { + return err + } + if tw != nil { + if err := tw.WriteHeader(h); err != nil { + return err + } + } else { + if _, err := dst.Write(tr.RawBytes()); err != nil { + return err + } + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + + // We flush the underlying compression writer here to correctly calculate "w.cw.n". + if err := w.flushGz(); err != nil { + return err + } + if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) { + if err := w.closeGz(); err != nil { + return err + } + ent.Offset = w.cw.n + prevOffset = ent.Offset + prevOffsetUncompressed = w.uncompressedCounter.n + } else { + ent.Offset = prevOffset + ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed + } + + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + if err := w.condOpenGz(); err != nil { + return err + } + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + var out io.Writer + if tw != nil { + out = tw + } else { + out = dst + } + if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if tw != nil { + if err := tw.Flush(); err != nil { + return err + } + } + } + remainDest := io.Discard + if lossless { + remainDest = dst // Preserve the remaining bytes in lossless mode + } + _, err := io.Copy(remainDest, src) + return err +} + +func (w *Writer) needsOpenGz(ent *TOCEntry) bool { + if ent.Type != "reg" { + return false + } + if w.needsOpenGzEntries == nil { + return false + } + _, ok := w.needsOpenGzEntries[ent.Name] + return ok +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { + for _, d := range decompressors { + if s := d.FooterSize(); res < s && s <= blobSize { + res = s + } + } + return +} + +func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if tocOff < 0 { + // This means that TOC isn't contained in the blob. + // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from + // the external location. + start := time.Now() + toc, tocDgst, err := d.ParseTOC(nil) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + if len(tocBytes) > 0 { + start := time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err == nil { + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + } + + start := time.Now() + tocBytes = make([]byte, tocSize) + if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + start = time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +type countWriteFlusher struct { + io.WriteCloser + n int64 +} + +func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser { + wc.WriteCloser = w + return wc +} + +func (wc *countWriteFlusher) Write(p []byte) (n int, err error) { + n, err = wc.WriteCloser.Write(p) + wc.n += int64(n) + return +} + +func (wc *countWriteFlusher) Flush() error { + if f, ok := wc.WriteCloser.(interface { + Flush() error + }); ok { + return f.Flush() + } + return nil +} + +func (wc *countWriteFlusher) Close() error { + err := wc.WriteCloser.Close() + wc.WriteCloser = nil + return err +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} + +func positive(n int64) int64 { + if n < 0 { + return 0 + } + return n +} + +type countReader struct { + r io.Reader + n int64 +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + cr.n += int64(n) + return +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go new file mode 100644 index 0000000000..f24afe32f4 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -0,0 +1,237 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + + digest "github.com/opencontainers/go-digest" +) + +type gzipCompression struct { + *GzipCompressor + *GzipDecompressor +} + +func newGzipCompressionWithLevel(level int) Compression { + return &gzipCompression{ + &GzipCompressor{level}, + &GzipDecompressor{}, + } +} + +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{gzip.BestCompression} +} + +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{level} +} + +type GzipCompressor struct { + compressionLevel int +} + +func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) + gw := io.Writer(gz) + if diffHash != nil { + gw = io.MultiWriter(gz, diffHash) + } + tw := tar.NewWriter(gw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + if _, err := w.Write(gzipFooterBytes(off)); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// gzipFooterBytes returns the 51 bytes footer. +func gzipFooterBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +type GzipDecompressor struct{} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +type LegacyGzipDecompressor struct{} + +func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != legacyFooterSize { + return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) + } + defer zr.Close() + extra := zr.Header.Extra + if len(extra) != 16+len("STARGZ") { + return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *LegacyGzipDecompressor) FooterSize() int64 { + return legacyFooterSize +} + +func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + return readCloser{tr, zr.Close}, nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go new file mode 100644 index 0000000000..0ca6fd75f2 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -0,0 +1,2366 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// TestingController is Compression with some helper methods necessary for testing. +type TestingController interface { + Compression + TestStreams(t *testing.T, b []byte, streams []int64) + DiffIDOf(*testing.T, []byte) string + String() string +} + +// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. +func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { + t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) + t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +} + +type TestingControllerFactory func() TestingController + +const ( + uncompressedType int = iota + gzipType + zstdType +) + +var srcCompressions = []int{ + uncompressedType, + gzipType, + zstdType, +} + +var allowedPrefix = [4]string{"", "./", "/", "../"} + +// testBuild tests the resulting stargz blob built by this pkg has the same +// contents as the normal stargz blob. +func testBuild(t *testing.T, controllers ...TestingControllerFactory) { + tests := []struct { + name string + chunkSize int + minChunkSize []int + in []tarEntry + }{ + { + name: "regfiles and directories", + chunkSize: 4, + in: tarOf( + file("foo", "test1"), + dir("foo2/"), + file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), + ), + }, + { + name: "empty files", + chunkSize: 4, + in: tarOf( + file("foo", "tttttt"), + file("foo_empty", ""), + file("foo2", "tttttt"), + file("foo_empty2", ""), + file("foo3", "tttttt"), + file("foo_empty3", ""), + file("foo4", "tttttt"), + file("foo_empty4", ""), + file("foo5", "tttttt"), + file("foo_empty5", ""), + file("foo6", "tttttt"), + ), + }, + { + name: "various files", + chunkSize: 4, + minChunkSize: []int{0, 64000}, + in: tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo1.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + { + name: "no contents", + chunkSize: 4, + in: tarOf( + file("baz.txt", ""), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + } + for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, newCL := range controllers { + newCL := newCL + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, prefix := range allowedPrefix { + prefix := prefix + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) + } + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return + } + + // Prepare sample data + cl1 := newCL() + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl1) + sw.MinChunkSize = minChunkSize + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl1), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } + + // Prepare testing data + var opts []Option + if minChunkSize > 0 { + opts = append(opts, WithMinChunkSize(minChunkSize)) + } + cl2 := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl2), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } + + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl2.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + // Compare as stargz + if !isSameVersion(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } + + // Compare as tar.gz + if !isSameTarGz(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } + } + } + } + } + } +} + +func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aGz, err := cla.Reader(bytes.NewReader(a)) + if err != nil { + t.Fatalf("failed to read A") + } + defer aGz.Close() + bGz, err := clb.Reader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("failed to read B") + } + defer bGz.Close() + + // Same as tar's Next() method but ignores landmarks and TOCJSON file + next := func(r *tar.Reader) (h *tar.Header, err error) { + for { + if h, err = r.Next(); err != nil { + return + } + if h.Name != PrefetchLandmark && + h.Name != NoPrefetchLandmark && + h.Name != TOCTarName { + return + } + } + } + + aTar := tar.NewReader(aGz) + bTar := tar.NewReader(bGz) + for { + // Fetch and parse next header. + aH, aErr := next(aTar) + bH, bErr := next(bTar) + if aErr != nil || bErr != nil { + if aErr == io.EOF && bErr == io.EOF { + break + } + t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) + } + if !reflect.DeepEqual(aH, bH) { + t.Logf("different header (A = %v; B = %v)", aH, bH) + return false + + } + aFile, err := io.ReadAll(aTar) + if err != nil { + t.Fatal("failed to read tar payload of A") + } + bFile, err := io.ReadAll(bTar) + if err != nil { + t.Fatal("failed to read tar payload of B") + } + if !bytes.Equal(aFile, bFile) { + t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) + return false + } + } + + return true +} + +func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) + if err != nil { + t.Fatalf("failed to parse A: %v", err) + } + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb) + if err != nil { + t.Fatalf("failed to parse B: %v", err) + } + t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) + t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) + return aJTOC.Version == bJTOC.Version +} + +func isSameEntries(t *testing.T, a, b *Reader) bool { + aroot, ok := a.Lookup("") + if !ok { + t.Fatalf("failed to get root of A") + } + broot, ok := b.Lookup("") + if !ok { + t.Fatalf("failed to get root of B") + } + aEntry := stargzEntry{aroot, a} + bEntry := stargzEntry{broot, b} + return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) +} + +func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { + buf := new(bytes.Buffer) + var w io.WriteCloser + var err error + if srcCompression == gzipType { + w = gzip.NewWriter(buf) + } else if srcCompression == zstdType { + w, err = zstd.NewWriter(buf) + if err != nil { + t.Fatalf("failed to init zstd writer: %v", err) + } + } else { + return src + } + src.Seek(0, io.SeekStart) + if _, err := io.Copy(w, src); err != nil { + t.Fatalf("failed to compress source") + } + if err := w.Close(); err != nil { + t.Fatalf("failed to finalize compress source") + } + data := buf.Bytes() + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) + +} + +type stargzEntry struct { + e *TOCEntry + r *Reader +} + +// contains checks if all child entries in "b" are also contained in "a". +// This function also checks if the files/chunks contain the same contents among "a" and "b". +func contains(t *testing.T, a, b stargzEntry) bool { + ae, ar := a.e, a.r + be, br := b.e, b.r + t.Logf("Comparing: %q vs %q", ae.Name, be.Name) + if !equalEntry(ae, be) { + t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) + return false + } + if ae.Type == "dir" { + t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, + allChildrenName(ae), allChildrenName(be)) + iscontain := true + ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { + // Walk through all files on this stargz file. + + if aChild.Name == PrefetchLandmark || + aChild.Name == NoPrefetchLandmark { + return true // Ignore landmarks + } + + // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory + // because this points to the root directory itself. + if aChild.Name == "" && ae.Name == "" { + return true + } + + bChild, ok := be.LookupChild(aBaseName) + if !ok { + t.Logf("%q (base: %q): not found in b: %v", + ae.Name, aBaseName, allChildrenName(be)) + iscontain = false + return false + } + + childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) + if !childcontain { + t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) + iscontain = false + return false + } + return true + }) + return iscontain + } else if ae.Type == "reg" { + af, err := ar.OpenFile(ae.Name) + if err != nil { + t.Fatalf("failed to open file %q on A: %v", ae.Name, err) + } + bf, err := br.OpenFile(be.Name) + if err != nil { + t.Fatalf("failed to open file %q on B: %v", be.Name, err) + } + + var nr int64 + for nr < ae.Size { + abytes, anext, aok := readOffset(t, af, nr, a) + bbytes, bnext, bok := readOffset(t, bf, nr, b) + if !aok && !bok { + break + } else if !(aok && bok) || anext != bnext { + t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", + ae.Name, be.Name, nr, aok, bok, anext, bnext) + return false + } + nr = anext + if !bytes.Equal(abytes, bbytes) { + t.Logf("%q != %q: different contents %v vs %v", + ae.Name, be.Name, string(abytes), string(bbytes)) + return false + } + } + return true + } + + return true +} + +func allChildrenName(e *TOCEntry) (children []string) { + e.ForeachChild(func(baseName string, _ *TOCEntry) bool { + children = append(children, baseName) + return true + }) + return +} + +func equalEntry(a, b *TOCEntry) bool { + // Here, we selectively compare fileds that we are interested in. + return a.Name == b.Name && + a.Type == b.Type && + a.Size == b.Size && + a.ModTime3339 == b.ModTime3339 && + a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time + a.LinkName == b.LinkName && + a.Mode == b.Mode && + a.UID == b.UID && + a.GID == b.GID && + a.Uname == b.Uname && + a.Gname == b.Gname && + (a.Offset >= 0) == (b.Offset >= 0) && + (a.NextOffset() > 0) == (b.NextOffset() > 0) && + a.DevMajor == b.DevMajor && + a.DevMinor == b.DevMinor && + a.NumLink == b.NumLink && + reflect.DeepEqual(a.Xattrs, b.Xattrs) && + // chunk-related infomations aren't compared in this function. + // ChunkOffset int64 `json:"chunkOffset,omitempty"` + // ChunkSize int64 `json:"chunkSize,omitempty"` + // children map[string]*TOCEntry + a.Digest == b.Digest +} + +func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { + ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) + if !ok { + return nil, 0, false + } + data := make([]byte, ce.ChunkSize) + t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) + n, err := r.ReadAt(data, ce.ChunkOffset) + if err != nil { + t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", + e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) + } + if int64(n) != ce.ChunkSize { + t.Fatalf("unexpected copied data size %d; want %d", + n, ce.ChunkSize) + } + return data[:n], offset + ce.ChunkSize, true +} + +func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { + jtocData, err := json.Marshal(*tocJSON) + if err != nil { + t.Fatalf("failed to marshal TOC JSON: %v", err) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { + t.Fatalf("failed to read toc json blob: %v", err) + } + return buf.String() +} + +const chunkSize = 3 + +// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) + +// testDigestAndVerify runs specified checks against sample stargz blobs. +func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { + tests := []struct { + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + minChunkSize []int + }{ + { + name: "no-regfile", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + dir("test/"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + dir("test2/"), // modified + ), allowedPrefix[0])), + }, + }, + { + name: "small-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "bbb", dgstMap), + ) + }, + minChunkSize: []int{0, 64000}, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", ""), + file("foo.txt", "M"), // modified + dir("test/"), + file("test/bar.txt", "bbb"), + ), allowedPrefix[0])), + // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO + checkVerifyBrokenContentFail("foo.txt"), + }, + }, + { + name: "big-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified + file("foo.txt", "a"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + { + name: "with-non-regfiles", + minChunkSize: []int{0, 64000}, + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + regDigest(t, "bar/foo2.txt", "b", dgstMap), + regDigest(t, "foo3.txt", "c", dgstMap), + symlink("barlink", "test/bar.txt"), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + dir("test2/"), + link("test2/bazlink", "foo.txt"), // modified + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + } + + for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, newCL := range controllers { + newCL := newCL + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + cl := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl, newCL) + } + }) + } + } + } + } + } + } +} + +// checkStargzTOC checks the TOC JSON of the passed stargz has the expected +// digest and contains valid chunks. It walks all entries in the stargz and +// checks all chunk digests stored to the TOC JSON match the actual contents. +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + digestMapTOC, err := listDigests(io.NewSectionReader( + bytes.NewReader(sgzData), 0, int64(len(sgzData))), + controller, + ) + if err != nil { + t.Fatalf("failed to list digest: %v", err) + } + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + // Check the digest of TOC JSON based on the actual contents + // It's sure that TOC JSON exists in this archive because + // Open succeeded. + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), tr); err != nil { + t.Fatalf("failed to calculate digest of TOC JSON: %v", + err) + } + if dgstr.Digest() != tocDigest { + t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) + } + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + // Get the original digest to make sure the file contents are kept unchanged + // from the original tar, during the whole conversion steps. + id := chunkID(h.Name, n, ce.ChunkSize) + want, ok := dgstMap[id] + if !ok { + t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", + h.Name, n, ce.ChunkSize, dgstMap) + return + } + found[id] = true + + // Check the file contents + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if want != dgstr.Digest() { + t.Errorf("Invalid contents in converted stargz %q: %q; want %q", + h.Name, dgstr.Digest(), want) + return + } + + // Check the digest stored in TOC JSON + dgstTOC, ok := digestMapTOC[ce.Offset] + if !ok { + t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", + h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) + } + if want != dgstTOC { + t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", + h.Name, dgstTOC, want) + return + } + + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyTOC checks the verification works for the TOC JSON of the passed +// stargz. It walks all entries in the stargz and checks the verifications for +// all chunks work. +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Errorf("failed to verify stargz: %v", err) + return + } + + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + v, err := ev.Verifier(ce) + if err != nil { + t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) + } + + found[chunkID(h.Name, n, ce.ChunkSize)] = true + + // Check the file contents + if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if !v.Verified() { + t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", + h.Name) + return + } + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be +// detected during the verification and the verification returns an error. +func checkVerifyInvalidTOCEntryFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + funcs := map[string]rewriteFunc{ + "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var found bool + for _, e := range toc.Entries { + if cleanEntryName(e.Name) == filename { + if e.Type != "reg" && e.Type != "chunk" { + t.Fatalf("entry %q to break must be regfile or chunk", filename) + } + if e.ChunkDigest == "" { + t.Fatalf("entry %q is already invalid", filename) + } + e.ChunkDigest = "" + found = true + } + } + if !found { + t.Fatalf("rewrite target not found") + } + }, + "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var ( + sampleEntry *TOCEntry + targetEntry *TOCEntry + ) + for _, e := range toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if cleanEntryName(e.Name) == filename { + targetEntry = e + } else { + sampleEntry = e + } + } + } + if sampleEntry == nil { + t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") + } + targetEntry.Offset = sampleEntry.Offset + }, + } + + for name, rFunc := range funcs { + t.Run(name, func(t *testing.T) { + newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, newSgz); err != nil { + t.Fatalf("failed to get converted stargz") + } + isgz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(newTocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + }) + } + } +} + +// checkVerifyInvalidStargzFail checks if the verification detects that the +// given stargz file doesn't match to the expected digest and returns error. +func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + cl := newController() + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + mStargz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(tocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + } +} + +// checkVerifyBrokenContentFail checks if the verifier detects broken contents +// that doesn't match to the expected digest and returns error. +func checkVerifyBrokenContentFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + // Parse stargz file + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify stargz: %v", err) + return + } + + // Open the target file + sr, err := sgz.OpenFile(filename) + if err != nil { + t.Fatalf("failed to open file %q", filename) + } + ce, ok := sgz.ChunkEntryForOffset(filename, 0) + if !ok { + t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) + return + } + if ce.ChunkSize == 0 { + t.Fatalf("file mustn't be empty") + return + } + data := make([]byte, ce.ChunkSize) + if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { + t.Errorf("failed to get data of a chunk of %q(offset=%q)", + filename, ce.ChunkOffset) + } + + // Check the broken chunk (must fail) + v, err := ev.Verifier(ce) + if err != nil { + t.Fatalf("failed to get verifier for %q", filename) + } + broken := append([]byte{^data[0]}, data[1:]...) + if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + filename, ce.ChunkOffset, ce.ChunkSize) + } + if v.Verified() { + t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", + filename, data, broken) + } + } +} + +func chunkID(name string, offset, size int64) string { + return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) +} + +type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) + +func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { + decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) + if err != nil { + t.Fatalf("failed to extract TOC JSON: %v", err) + } + + rewrite(t, decodedJTOC, sgz) + + tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) + if err != nil { + t.Fatalf("failed to create toc and footer: %v", err) + } + + // Reconstruct stargz file with the modified TOC JSON + if _, err := sgz.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to reset the seek position of stargz: %v", err) + } + return io.MultiReader( + io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) + tocFooter, // Rewritten TOC and footer + ), tocDigest +} + +func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { + decodedJTOC, _, err := parseStargz(sgz, controller) + if err != nil { + return nil, err + } + digestMap := make(map[int64]digest.Digest) + for _, e := range decodedJTOC.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, err + } + digestMap[e.Offset] = d + } + } + return digestMap, nil +} + +func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { + fSize := controller.FooterSize() + footer := make([]byte, fSize) + if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { + return nil, 0, fmt.Errorf("error reading footer: %w", err) + } + _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) + } + + // Decode the TOC JSON + var tocReader io.Reader + if tocOffset >= 0 { + tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + } + decodedJTOC, _, err = controller.ParseTOC(tocReader) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) + } + return decodedJTOC, tocOffset, nil +} + +func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { + const content = "Some contents" + invalidUtf8 := "\xff\xfe\xfd" + + xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} + sampleOwner := owner{uid: 50, gid: 100} + + data64KB := randomContents(64000) + + tests := []struct { + name string + chunkSize int + minChunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams + + wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz + wantFailOnLossLess bool + wantTOCVersion int // default = 1 + }{ + { + name: "empty", + in: tarOf(), + wantNumGz: 2, // (empty tar) + TOC + footer + want: checks( + numTOCEntries(0), + ), + }, + { + name: "1dir_1empty_file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", ""), + ), + wantNumGz: 3, // dir, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", 0), + entryHasChildren("foo", "bar.txt"), + hasFileDigest("foo/bar.txt", digestFor("")), + ), + }, + { + name: "1dir_1file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", content, xAttrFile), + ), + wantNumGz: 4, // var dir, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + hasFileDigest("foo/bar.txt", digestFor(content)), + hasFileContentsRange("foo/bar.txt", 0, content), + hasFileContentsRange("foo/bar.txt", 1, content[1:]), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar.txt"), + hasFileXattrs("foo/bar.txt", "foo", "bar"), + hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), + ), + }, + { + name: "2meta_2file", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + }, + { + name: "3dir", + in: tarOf( + dir("bar/"), + dir("foo/"), + dir("foo/bar/"), + ), + wantNumGz: 3, // 3 dirs, TOC, footer + want: checks( + hasDirLinkCount("bar/", 2), + hasDirLinkCount("foo/", 3), + hasDirLinkCount("foo/bar/", 2), + ), + }, + { + name: "symlink", + in: tarOf( + dir("foo/"), + symlink("foo/bar", "../../x"), + ), + wantNumGz: 3, // metas + TOC + footer + want: checks( + numTOCEntries(2), + hasSymlink("foo/bar", "../../x"), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar"), + ), + }, + { + name: "chunked_file", + chunkSize: 4, + in: tarOf( + dir("foo/"), + file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), + ), + wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer + want: checks( + numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file + hasDir("foo/"), + hasFileLen("foo/big.txt", len("This is such a big file")), + hasFileDigest("foo/big.txt", digestFor("This is such a big file")), + hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), + hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), + hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), + hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), + hasFileContentsRange("foo/big.txt", 4, " is such a big file"), + hasFileContentsRange("foo/big.txt", 5, "is such a big file"), + hasFileContentsRange("foo/big.txt", 6, "s such a big file"), + hasFileContentsRange("foo/big.txt", 7, " such a big file"), + hasFileContentsRange("foo/big.txt", 8, "such a big file"), + hasFileContentsRange("foo/big.txt", 9, "uch a big file"), + hasFileContentsRange("foo/big.txt", 10, "ch a big file"), + hasFileContentsRange("foo/big.txt", 11, "h a big file"), + hasFileContentsRange("foo/big.txt", 12, " a big file"), + hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), + hasChunkEntries("foo/big.txt", 6), + ), + }, + { + name: "recursive", + in: tarOf( + dir("/", sampleOwner), + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // dirs, bar.txt alone, TOC, footer + want: checks( + maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" + ), + }, + { + name: "block_char_fifo", + in: tarOf( + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "b", + Typeflag: tar.TypeBlock, + Devmajor: 123, + Devminor: 456, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "c", + Typeflag: tar.TypeChar, + Devmajor: 111, + Devminor: 222, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "f", + Typeflag: tar.TypeFifo, + Format: format, + }) + }), + ), + wantNumGz: 3, + want: checks( + lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), + lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), + lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), + ), + }, + { + name: "modes", + in: tarOf( + dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), + file("foo1/bar1", content, 0700|os.ModeSetuid), + file("foo1/bar2", content, 0755|os.ModeSetgid), + dir("foo2/", 0755|os.ModeDir|os.ModeSticky), + file("foo2/bar3", content, 0755|os.ModeSticky), + dir("foo3/", 0755|os.ModeDir), + file("foo3/bar4", content, os.FileMode(0700)), + file("foo3/bar5", content, os.FileMode(0755)), + ), + wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer + want: checks( + hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), + hasMode("foo1/bar1", 0700|os.ModeSetuid), + hasMode("foo1/bar2", 0755|os.ModeSetgid), + hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), + hasMode("foo2/bar3", 0755|os.ModeSticky), + hasMode("foo3/", 0755|os.ModeDir), + hasMode("foo3/bar4", os.FileMode(0700)), + hasMode("foo3/bar5", os.FileMode(0755)), + ), + }, + { + name: "lossy", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + wantFailOnLossLess: true, + }, + { + name: "hardlink should be replaced to the destination entry", + in: tarOf( + dir("foo/"), + file("foo/foo1", "test"), + link("foolink", "foo/foo1"), + ), + wantNumGz: 4, // dir, foo1 + link, TOC, footer + want: checks( + mustSameEntry("foo/foo1", "foolink"), + ), + }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + file("foo22", "ccc"), + dir("bar/"), + file("bar/bar.txt", "aaa"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3 + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo22", len("ccc")), + hasFileLen("bar/bar.txt", len("aaa")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo22", digestFor("ccc")), + hasFileDigest("bar/bar.txt", digestFor("aaa")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo22", 0, "ccc"), + hasFileContentsRange("foo22", 1, "cc"), + hasFileContentsRange("foo22", 2, "c"), + hasFileContentsRange("bar/bar.txt", 0, "aaa"), + hasFileContentsRange("bar/bar.txt", 1, "aa"), + hasFileContentsRange("bar/bar.txt", 2, "a"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + dir("bar/"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks) + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo/foo1", 1, data64KB[1:]), + hasFileContentsRange("foo/foo1", 2, data64KB[2:]), + hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + } + + for _, tt := range tests { + for _, newCL := range controllers { + newCL := newCL + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, lossless := range []bool{true, false} { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { + var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) + origTarDgstr := digest.Canonical.Digester() + tr = io.TeeReader(tr, origTarDgstr.Hash()) + var stargzBuf bytes.Buffer + cl1 := newCL() + w := NewWriterWithCompressor(&stargzBuf, cl1) + w.ChunkSize = tt.chunkSize + w.MinChunkSize = tt.minChunkSize + if lossless { + err := w.AppendTarLossLess(tr) + if tt.wantFailOnLossLess { + if err != nil { + return // expected to fail + } + t.Fatalf("Append wanted to fail on lossless") + } + if err != nil { + t.Fatalf("Append(lossless): %v", err) + } + } else { + if err := w.AppendTar(tr); err != nil { + t.Fatalf("Append: %v", err) + } + } + if _, err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + b := stargzBuf.Bytes() + + if lossless { + // Check if the result blob reserves original tar metadata + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1) + if err != nil { + t.Errorf("failed to decompress blob: %v", err) + return + } + defer rc.Close() + resultDgstr := digest.Canonical.Digester() + if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { + t.Errorf("failed to read result decompressed blob: %v", err) + return + } + if resultDgstr.Digest() != origTarDgstr.Digest() { + t.Errorf("lossy compression occurred: digest=%v; want %v", + resultDgstr.Digest(), origTarDgstr.Digest()) + return + } + } + + diffID := w.DiffID() + wantDiffID := cl1.DiffIDOf(t, b) + if diffID != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + telemetry, checkCalled := newCalledTelemetry() + sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) + r, err := Open( + sr, + WithDecompressors(cl1), + WithTelemetry(telemetry), + ) + if err != nil { + t.Fatalf("stargz.Open: %v", err) + } + wantTOCVersion := 1 + if tt.wantTOCVersion > 0 { + wantTOCVersion = tt.wantTOCVersion + } + if r.toc.Version != wantTOCVersion { + t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion) + } + + footerSize := cl1.FooterSize() + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + t.Errorf("failed to read footer: %v", err) + } + _, tocOffset, _, err := cl1.ParseFooter(footer) + if err != nil { + t.Errorf("failed to parse footer: %v", err) + } + if err := checkCalled(tocOffset >= 0); err != nil { + t.Errorf("telemetry failure: %v", err) + } + + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + streamOffsets := []int64{0} + prevOffset := int64(-1) + streams := 0 + for _, e := range r.toc.Entries { + if e.Offset > prevOffset { + streamOffsets = append(streamOffsets, e.Offset) + prevOffset = e.Offset + streams++ + } + } + streams++ // TOC + if tocOffset >= 0 { + // toc is in the blob + streamOffsets = append(streamOffsets, tocOffset) + } + streams++ // footer + streamOffsets = append(streamOffsets, footerOffset) + if streams != wantNumGz { + t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz) + } + + t.Logf("testing streams: %+v", streamOffsets) + cl1.TestStreams(t, b, streamOffsets) + + for _, want := range tt.want { + want.check(t, r) + } + }) + } + } + } + } + } +} + +type chunkInfo struct { + name string + data string +} + +func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) { + var getFooterLatencyCalled bool + var getTocLatencyCalled bool + var deserializeTocLatencyCalled bool + return &Telemetry{ + func(time.Time) { getFooterLatencyCalled = true }, + func(time.Time) { getTocLatencyCalled = true }, + func(time.Time) { deserializeTocLatencyCalled = true }, + }, func(needsGetTOC bool) error { + var allErr []error + if !getFooterLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) + } + if needsGetTOC { + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } + } + if !deserializeTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) + } + return errorutil.Aggregate(allErr) + } +} + +func digestFor(content string) string { + sum := sha256.Sum256([]byte(content)) + return fmt.Sprintf("sha256:%x", sum) +} + +type numTOCEntries int + +func (n numTOCEntries) check(t *testing.T, r *Reader) { + if r.toc == nil { + t.Fatal("nil TOC") + } + if got, want := len(r.toc.Entries), int(n); got != want { + t.Errorf("got %d TOC entries; want %d", got, want) + } + t.Logf("got TOC entries:") + for i, ent := range r.toc.Entries { + entj, _ := json.Marshal(ent) + t.Logf(" [%d]: %s\n", i, entj) + } + if t.Failed() { + t.FailNow() + } +} + +func checks(s ...stargzCheck) []stargzCheck { return s } + +type stargzCheck interface { + check(t *testing.T, r *Reader) +} + +type stargzCheckFn func(*testing.T, *Reader) + +func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } + +func maxDepth(max int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup("") + if !ok { + t.Fatal("root directory not found") + } + d, err := getMaxDepth(t, e, 0, 10*max) + if err != nil { + t.Errorf("failed to get max depth (wanted %d): %v", max, err) + return + } + if d != max { + t.Errorf("invalid depth %d; want %d", d, max) + return + } + }) +} + +func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { + if current > limit { + return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", + current, limit) + } + max = current + e.ForeachChild(func(baseName string, ent *TOCEntry) bool { + t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) + d, err := getMaxDepth(t, ent, current+1, limit) + if err != nil { + rErr = err + return false + } + if d > max { + max = d + } + return true + }) + return +} + +func hasFileLen(file string, wantLen int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } else if ent.Size != int64(wantLen) { + t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileXattrs(file, name, value string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } + if ent.Xattrs == nil { + t.Errorf("file %q has no xattrs", file) + return + } + valueFound, found := ent.Xattrs[name] + if !found { + t.Errorf("file %q has no xattr %q", file, name) + return + } + if string(valueFound) != value { + t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) + } + + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileDigest(file string, digest string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("didn't find TOCEntry for file %q", file) + } + if ent.Digest != digest { + t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) + } + }) +} + +func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + extraMap := make(map[string]chunkInfo) + for _, e := range extra { + extraMap[e.name] = e + } + var extraNames []string + for n := range extraMap { + extraNames = append(extraNames, n) + } + f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error { + t.Logf("On %q: got preread of %q", file, e.Name) + ex, ok := extraMap[e.Name] + if !ok { + t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames) + } + got, err := io.ReadAll(cr) + if err != nil { + t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err) + } + if ex.data != string(got) { + t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data)) + } + delete(extraMap, e.Name) + return nil + }) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + if len(extraMap) != 0 { + var exNames []string + for _, ex := range extraMap { + exNames = append(exNames, ex.name) + } + t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames) + } + }) +} + +func hasFileContentsRange(file string, offset int, want string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + f, err := r.OpenFile(file) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + }) +} + +func hasChunkEntries(file string, wantChunks int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("no file for %q", file) + } + if ent.Type != "reg" { + t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) + } + chunks := r.getChunks(ent) + if len(chunks) != wantChunks { + t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) + return + } + f := chunks[0] + + var gotChunks []*TOCEntry + var last *TOCEntry + for off := int64(0); off < f.Size; off++ { + e, ok := r.ChunkEntryForOffset(file, off) + if !ok { + t.Errorf("no ChunkEntryForOffset at %d", off) + return + } + if last != e { + gotChunks = append(gotChunks, e) + last = e + } + } + if !reflect.DeepEqual(chunks, gotChunks) { + t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) + } + + // And verify the NextOffset + for i := 0; i < len(gotChunks)-1; i++ { + ci := gotChunks[i] + cnext := gotChunks[i+1] + if ci.NextOffset() != cnext.Offset { + t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) + } + } + }) +} + +func entryHasChildren(dir string, want ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + want := append([]string(nil), want...) + var got []string + ent, ok := r.Lookup(dir) + if !ok { + t.Fatalf("didn't find TOCEntry for dir node %q", dir) + } + for baseName := range ent.children { + got = append(got, baseName) + } + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(got, want) { + t.Errorf("children of %q = %q; want %q", dir, got, want) + } + }) +} + +func hasDir(file string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasDirLinkCount(file string, count int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + return + } + if ent.NumLink != count { + t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasMode(file string, mode os.FileMode) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Stat().Mode() != mode { + t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) + return + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasSymlink(file, target string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "symlink" { + t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) + } else if ent.LinkName != target { + t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) + } + return + } + } + t.Errorf("symlink %q not found", file) + }) +} + +func lookupMatch(name string, want *TOCEntry) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup(name) + if !ok { + t.Fatalf("failed to Lookup entry %q", name) + } + if !reflect.DeepEqual(e, want) { + t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) + } + + }) +} + +func hasEntryOwner(entry string, owner owner) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) + if !ok { + t.Errorf("entry %q not found", entry) + return + } + if ent.UID != owner.uid || ent.GID != owner.gid { + t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) + return + } + }) +} + +func mustSameEntry(files ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + var first *TOCEntry + for _, f := range files { + if first == nil { + var ok bool + first, ok = r.Lookup(f) + if !ok { + t.Errorf("unknown first file on Lookup: %q", f) + return + } + } + + // Test Lookup + e, ok := r.Lookup(f) + if !ok { + t.Errorf("unknown file on Lookup: %q", f) + return + } + if e != first { + t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test LookupChild + pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get parent of %q", f) + return + } + e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get %q as the child of %+v", f, pe) + return + } + if e != first { + t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test ForeachChild + pe.ForeachChild(func(baseName string, e *TOCEntry) bool { + if baseName == filepath.Base(filepath.Clean(f)) { + if e != first { + t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) + return false + } + } + return true + }) + } + }) +} + +func viewContent(c []byte) string { + if len(c) < 100 { + return string(c) + } + return string(c[:50]) + "...(omit)..." + string(c[50:100]) +} + +func tarOf(s ...tarEntry) []tarEntry { return s } + +type tarEntry interface { + appendTar(tw *tar.Writer, prefix string, format tar.Format) error +} + +type tarEntryFunc func(*tar.Writer, string, tar.Format) error + +func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { + return f(tw, prefix, format) +} + +func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { + format := tar.FormatUnknown + for _, opt := range opts { + switch v := opt.(type) { + case tar.Format: + format = v + default: + panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) + } + } + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, ent := range ents { + if err := ent.appendTar(tw, prefix, format); err != nil { + t.Fatalf("building input tar: %v", err) + } + } + if err := tw.Close(); err != nil { + t.Errorf("closing write of input tar: %v", err) + } + data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) +} + +func dir(name string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var o owner + mode := os.FileMode(0755) + for _, opt := range opts { + switch v := opt.(type) { + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if !strings.HasSuffix(name, "/") { + panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: prefix + name, + Mode: tm, + Uid: o.uid, + Gid: o.gid, + Format: format, + }) + }) +} + +// xAttr are extended attributes to set on test files created with the file func. +type xAttr map[string]string + +// owner is owner ot set on test files and directories with the file and dir functions. +type owner struct { + uid int + gid int +} + +func file(name, contents string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var xattrs xAttr + var o owner + mode := os.FileMode(0644) + for _, opt := range opts { + switch v := opt.(type) { + case xAttr: + xattrs = v + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if strings.HasSuffix(name, "/") { + return fmt.Errorf("bogus trailing slash in file %q", name) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + if len(xattrs) > 0 { + format = tar.FormatPAX // only PAX supports xattrs + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Mode: tm, + Xattrs: xattrs, + Size: int64(len(contents)), + Uid: o.uid, + Gid: o.gid, + Format: format, + }); err != nil { + return err + } + _, err = io.WriteString(tw, contents) + return err + }) +} + +func symlink(name, target string) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeSymlink, + Name: prefix + name, + Linkname: target, + Mode: 0644, + Format: format, + }) + }) +} + +func link(name string, linkname string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: prefix + name, + Linkname: linkname, + ModTime: now, + Format: format, + }) + }) +} + +func chardev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeChar, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} + +func blockdev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeBlock, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} +func fifo(name string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeFifo, + Name: prefix + name, + ModTime: now, + Format: format, + }) + }) +} + +func prefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func noPrefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { + if digestMap == nil { + t.Fatalf("digest map mustn't be nil") + } + content := []byte(contentStr) + + var n int64 + for n < int64(len(content)) { + size := int64(chunkSize) + remain := int64(len(content)) - n + if remain < size { + size = remain + } + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { + t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", + string(content[n:n+size]), name, n, size) + } + digestMap[chunkID(name, n, size)] = dgstr.Digest() + n += size + } + + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Size: int64(len(content)), + Format: format, + }); err != nil { + return err + } + if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { + return err + } + return nil + }) +} + +var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomContents(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = runes[rand.Intn(len(runes))] + } + return string(b) +} + +func fileModeToTarMode(mode os.FileMode) (int64, error) { + h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") + if err != nil { + return 0, err + } + return h.Mode, nil +} + +// fileInfoOnlyMode is os.FileMode that populates only file mode. +type fileInfoOnlyMode os.FileMode + +func (f fileInfoOnlyMode) Name() string { return "" } +func (f fileInfoOnlyMode) Size() int64 { return 0 } +func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } +func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } +func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } +func (f fileInfoOnlyMode) Sys() interface{} { return nil } + +func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { + if len(streams) == 0 { + return // nop + } + + wants := map[int64]struct{}{} + for _, s := range streams { + wants[s] = struct{}{} + } + + len0 := len(b) + br := bytes.NewReader(b) + zr := new(gzip.Reader) + t.Logf("got gzip streams:") + numStreams := 0 + for { + zoff := len0 - br.Len() + if err := zr.Reset(br); err != nil { + if err == io.EOF { + return + } + t.Fatalf("countStreams(gzip), Reset: %v", err) + } + zr.Multistream(false) + n, err := io.Copy(io.Discard, zr) + if err != nil { + t.Fatalf("countStreams(gzip), Copy: %v", err) + } + var extra string + if len(zr.Header.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + } + t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) + delete(wants, int64(zoff)) + numStreams++ + } +} + +func GzipDiffIDOf(t *testing.T, b []byte) string { + h := sha256.New() + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("diffIDOf(gzip): %v", err) + } + defer zr.Close() + if _, err := io.Copy(h, zr); err != nil { + t.Fatalf("diffIDOf(gzip).Copy: %v", err) + } + return fmt.Sprintf("sha256:%x", h.Sum(nil)) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 0000000000..57e0aa614e --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,342 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "hash" + "io" + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy + // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size + // to the runtime but current OCI image doesn't ship this information by default. So we store this + // to the special annotation. + StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// JTOC is the JSON-serialized table of contents index of the files in the stargz file. +type JTOC struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + // InnerOffset is an optional field indicates uncompressed offset + // of this "reg" or "chunk" payload in a stream starts from Offset. + // This field enables to put multiple "reg" or "chunk" payloads + // in one chunk with having the same Offset but different InnerOffset. + InnerOffset int64 `json:"innerOffset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + // This field is calculated during runtime and not recorded in TOC JSON. + NumLink int `json:"-"` + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry + + // chunkTopIndex is index of the entry where Offset starts in the blob. + chunkTopIndex int +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. + m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & + (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} + +// Compression provides the compression helper to be used creating and parsing eStargz. +// This package provides gzip-based Compression by default, but any compression +// algorithm (e.g. zstd) can be used as long as it implements Compression. +type Compression interface { + Compressor + Decompressor +} + +// Compressor represents the helper mothods to be used for creating eStargz. +type Compressor interface { + // Writer returns WriteCloser to be used for writing a chunk to eStargz. + // Everytime a chunk is written, the WriteCloser is closed and Writer is + // called again for writing the next chunk. + // + // The returned writer should implement "Flush() error" function that flushes + // any pending compressed data to the underlying writer. + Writer(w io.Writer) (WriteFlushCloser, error) + + // WriteTOCAndFooter is called to write JTOC to the passed Writer. + // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob + // WriteTOCAndFooter can optionally write anything that affects DiffID calculation + // (e.g. uncompressed TOC JSON). + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob when it's parsed. + WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) +} + +// Decompressor represents the helper mothods to be used for parsing eStargz. +type Decompressor interface { + // Reader returns ReadCloser to be used for decompressing file payload. + Reader(r io.Reader) (io.ReadCloser, error) + + // FooterSize returns the size of the footer of this blob. + FooterSize() int64 + + // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. + // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between + // the top until the TOC JSON). + // + // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader + // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it. + // + // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the + // footer (blob size - tocOff - FooterSize). + // If blobPayloadSize < 0, blobPayloadSize become the blob size. + ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) + + // ParseTOC parses TOC from the passed reader. The reader provides the partial contents + // of the underlying blob that has the range specified by ParseFooter method. + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob. This must match to the value returned from + // Compressor.WriteTOCAndFooter that is used when creating this blob. + // + // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. + // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location + // and return it. + ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) +} + +type WriteFlushCloser interface { + io.WriteCloser + Flush() error +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go new file mode 100644 index 0000000000..222723a8f5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -0,0 +1,284 @@ +package directory + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const version = "Directory Transport Version: 1.1\n" + +// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created +// using the 'dir' transport +var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") + +type dirImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + stubs.AlwaysSupportsSignatures + + ref dirReference +} + +// newImageDestination returns an ImageDestination for writing to a directory. +func newImageDestination(sys *types.SystemContext, ref dirReference) (private.ImageDestination, error) { + desiredLayerCompression := types.PreserveOriginal + if sys != nil { + if sys.DirForceCompress { + desiredLayerCompression = types.Compress + + if sys.DirForceDecompress { + return nil, fmt.Errorf("Cannot compress and decompress at the same time") + } + } + if sys.DirForceDecompress { + desiredLayerCompression = types.Decompress + } + } + + // If directory exists check if it is empty + // if not empty, check whether the contents match that of a container image directory and overwrite the contents + // if the contents don't match throw an error + dirExists, err := pathExists(ref.resolvedPath) + if err != nil { + return nil, fmt.Errorf("checking for path %q: %w", ref.resolvedPath, err) + } + if dirExists { + isEmpty, err := isDirEmpty(ref.resolvedPath) + if err != nil { + return nil, err + } + + if !isEmpty { + versionExists, err := pathExists(ref.versionPath()) + if err != nil { + return nil, fmt.Errorf("checking if path exists %q: %w", ref.versionPath(), err) + } + if versionExists { + contents, err := os.ReadFile(ref.versionPath()) + if err != nil { + return nil, err + } + // check if contents of version file is what we expect it to be + if string(contents) != version { + return nil, ErrNotContainerImageDir + } + } else { + return nil, ErrNotContainerImageDir + } + // delete directory contents so that only one image is in the directory at a time + if err = removeDirContents(ref.resolvedPath); err != nil { + return nil, fmt.Errorf("erasing contents in %q: %w", ref.resolvedPath, err) + } + logrus.Debugf("overwriting existing container image directory %q", ref.resolvedPath) + } + } else { + // create directory if it doesn't exist + if err := os.MkdirAll(ref.resolvedPath, 0755); err != nil { + return nil, fmt.Errorf("unable to create directory %q: %w", ref.resolvedPath, err) + } + } + // create version file + err = os.WriteFile(ref.versionPath(), []byte(version), 0644) + if err != nil { + return nil, fmt.Errorf("creating version file %q: %w", ref.versionPath(), err) + } + + d := &dirImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: nil, + DesiredLayerCompression: desiredLayerCompression, + AcceptsForeignLayerURLs: false, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. + HasThreadSafePutBlob: true, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + + ref: ref, + } + d.Compat = impl.AddCompat(d) + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dirImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dirImageDestination) Close() error { + return nil +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob") + if err != nil { + return private.UploadedBlob{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + blobFile.Close() + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return private.UploadedBlob{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return private.UploadedBlob{}, err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return private.UploadedBlob{}, err + } + } + + blobPath := d.ref.layerPath(blobDigest) + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return private.UploadedBlob{}, err + } + succeeded = true + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } + if info.Digest == "" { + return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest") + } + blobPath := d.ref.layerPath(info.Digest) + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, private.ReusedBlob{}, nil + } + if err != nil { + return false, private.ReusedBlob{}, err + } + return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { + return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + for i, sig := range signatures { + blob, err := signature.Blob(sig) + if err != nil { + return err + } + if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil { + return err + } + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error { + return nil +} + +// returns true if path exists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// returns true if directory is empty +func isDirEmpty(path string) (bool, error) { + files, err := os.ReadDir(path) + if err != nil { + return false, err + } + return len(files) == 0, nil +} + +// deletes the contents of a directory +func removeDirContents(path string) error { + files, err := os.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go new file mode 100644 index 0000000000..5fc83bb6f9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -0,0 +1,102 @@ +package directory + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +type dirImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + + ref dirReference +} + +// newImageSource returns an ImageSource reading from an existing directory. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ref dirReference) private.ImageSource { + s := &dirImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + } + s.Compat = impl.AddCompat(s) + return s +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dirImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dirImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + m, err := os.ReadFile(s.ref.manifestPath(instanceDigest)) + if err != nil { + return nil, "", err + } + return m, manifest.GuessMIMEType(m), err +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + r, err := os.Open(s.ref.layerPath(info.Digest)) + if err != nil { + return nil, -1, err + } + fi, err := r.Stat() + if err != nil { + return nil, -1, err + } + return r, fi.Size(), nil +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + signatures := []signature.Signature{} + for i := 0; ; i++ { + path := s.ref.signaturePath(i, instanceDigest) + sigBlob, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + break + } + return nil, err + } + signature, err := signature.FromBlob(sigBlob) + if err != nil { + return nil, fmt.Errorf("parsing signature %q: %w", path, err) + } + signatures = append(signatures, signature) + } + return signatures, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go new file mode 100644 index 0000000000..7e3068693d --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go @@ -0,0 +1,188 @@ +package directory + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for directory paths. +var Transport = dirTransport{} + +type dirTransport struct{} + +func (t dirTransport) Name() string { + return "dir" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// dirReference is an ImageReference for directory paths. +type dirReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + path string // As specified by the user. May be relative, contain symlinks, etc. + resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no directory.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// dirTransport.ParseReference; callers who intentionally deal with directories +// can use directory.NewReference. + +// NewReference returns a directory reference for a specified path. +// +// We do not expose an API supplying the resolvedPath; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedPath. +func NewReference(path string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) + if err != nil { + return nil, err + } + return dirReference{path: path, resolvedPath: resolved}, nil +} + +func (ref dirReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dirReference) StringWithinTransport() string { + return ref.path +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dirReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dirReference) PolicyConfigurationIdentity() string { + return ref.resolvedPath +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dirReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedPath + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by dirTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ref), nil +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for dir: images") +} + +// manifestPath returns a path for the manifest within a directory using our conventions. +func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json") + } + return filepath.Join(ref.path, "manifest.json") +} + +// layerPath returns a path for a layer tarball within a directory using our conventions. +func (ref dirReference) layerPath(digest digest.Digest) string { + // FIXME: Should we keep the digest identification? + return filepath.Join(ref.path, digest.Encoded()) +} + +// signaturePath returns a path for a signature within a directory using our conventions. +func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)) + } + return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) +} + +// versionPath returns a path for the version file within a directory using our conventions. +func (ref dirReference) versionPath() string { + return filepath.Join(ref.path, "version") +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go new file mode 100644 index 0000000000..2c245f54f9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -0,0 +1,101 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/v5/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if sys != nil && sys.DockerDaemonHost != "" { + host = sys.DockerDaemonHost + } + + opts := []dockerclient.Opt{ + dockerclient.WithHost(host), + dockerclient.WithVersion(defaultAPIVersion), + } + + // We conditionalize building the TLS configuration only to TLS sockets: + // + // The dockerclient.Client implementation differentiates between + // - Client.proto, which is ~how the connection is establishe (IP / AF_UNIX/Windows) + // - Client.scheme, which is what is sent over the connection (HTTP with/without TLS). + // + // Only Client.proto is set from the URL in dockerclient.WithHost(), + // Client.scheme is detected based on a http.Client.TLSClientConfig presence; + // dockerclient.WithHTTPClient with a client that has TLSClientConfig set + // will, by default, trigger an attempt to use TLS. + // + // So, don’t use WithHTTPClient for unix:// sockets at all. + // + // Similarly, if we want to communicate over plain HTTP on a TCP socket (http://), + // we also should not set TLSClientConfig. We continue to use WithHTTPClient + // with our slightly non-default settings to avoid a behavior change on updates of c/image. + // + // Alternatively we could use dockerclient.WithScheme to drive the TLS/non-TLS logic + // explicitly, but we would still want to set WithHTTPClient (differently) for https:// and http:// ; + // so that would not be any simpler. + serverURL, err := dockerclient.ParseHostURL(host) + if err != nil { + return nil, err + } + switch serverURL.Scheme { + case "unix": // Nothing + case "http": + hc := httpConfig() + opts = append(opts, dockerclient.WithHTTPClient(hc)) + default: + hc, err := tlsConfig(sys) + if err != nil { + return nil, err + } + opts = append(opts, dockerclient.WithHTTPClient(hc)) + } + + return dockerclient.NewClientWithOpts(opts...) +} + +func tlsConfig(sys *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if sys != nil && sys.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} + +func httpConfig() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: nil, + }, + CheckRedirect: dockerclient.CheckRedirect, + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go new file mode 100644 index 0000000000..55431db13a --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -0,0 +1,186 @@ +package daemon + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/docker/docker/client" + "github.com/sirupsen/logrus" +) + +type daemonImageDestination struct { + ref daemonReference + mustMatchRuntimeOS bool + *tarfile.Destination // Implements most of types.ImageDestination + archive *tarfile.Writer + // For talking to imageLoadGoroutine + goroutineCancel context.CancelFunc + statusChannel <-chan error + writer *io.PipeWriter + // Other state + committed bool // writer has been closed +} + +// newImageDestination returns a types.ImageDestination for the specified image reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageDestination, error) { + if ref.ref == nil { + return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + namedTaggedRef, ok := ref.ref.(reference.NamedTagged) + if !ok { + return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + + var mustMatchRuntimeOS = true + if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(sys) + if err != nil { + return nil, fmt.Errorf("initializing docker engine client: %w", err) + } + + reader, writer := io.Pipe() + archive := tarfile.NewWriter(writer) + // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. + statusChannel := make(chan error, 1) + + goroutineContext, goroutineCancel := context.WithCancel(ctx) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) + + return &daemonImageDestination{ + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef), + archive: archive, + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, + }, nil +} + +// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel +func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { + defer c.Close() + err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") + defer func() { + logrus.Debugf("docker-daemon: sending done, status %v", err) + statusChannel <- err + }() + defer func() { + if err == nil { + reader.Close() + } else { + if err := reader.CloseWithError(err); err != nil { + logrus.Debugf("imageLoadGoroutine: Error during reader.CloseWithError: %v", err) + } + } + }() + + err = imageLoad(ctx, c, reader) +} + +// imageLoad accepts tar stream on reader and sends it to c +func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error { + resp, err := c.ImageLoad(ctx, reader, true) + if err != nil { + return fmt.Errorf("starting a load operation in docker engine: %w", err) + } + defer resp.Body.Close() + + // jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage, + // copied here to minimize dependencies. + type jsonError struct { + Message string `json:"message,omitempty"` + } + type jsonMessage struct { + Error *jsonError `json:"errorDetail,omitempty"` + } + + dec := json.NewDecoder(resp.Body) + for { + var msg jsonMessage + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("parsing docker load progress: %w", err) + } + if msg.Error != nil { + return fmt.Errorf("docker engine reported: %s", msg.Error.Message) + } + } + return nil // No error reported = success +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *daemonImageDestination) MustMatchRuntimeOS() bool { + return d.mustMatchRuntimeOS +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *daemonImageDestination) Close() error { + if !d.committed { + logrus.Debugf("docker-daemon: Closing tar stream to abort loading") + // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. + // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including + // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the + // net/http version with native Context support in Go 1.7) do not always actually immediately cancel + // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and + // return early if the context is canceled without terminating the goroutine at all. + // So we need this CloseWithError to terminate sending the HTTP request Body + // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending + // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. + // Whether that works or not, closing the PipeWriter seems desirable in any case. + if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil { + return err + } + } + d.goroutineCancel() + + return nil +} + +func (d *daemonImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + logrus.Debugf("docker-daemon: Closing tar stream") + if err := d.archive.Close(); err != nil { + return err + } + if err := d.writer.Close(); err != nil { + return err + } + d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. + + logrus.Debugf("docker-daemon: Waiting for status") + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-d.statusChannel: + return err + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go new file mode 100644 index 0000000000..10923c278e --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go @@ -0,0 +1,56 @@ +package daemon + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +type daemonImageSource struct { + ref daemonReference + *tarfile.Source // Implements most of types.ImageSource +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +// +// It would be great if we were able to stream the input tar as it is being +// sent; but Docker sends the top-level manifest, which determines which paths +// to look for, at the end, so in we will need to seek back and re-read, several times. +// (We could, perhaps, expect an exact sequence, assume that the first plaintext file +// is the config, and that the following len(RootFS) files are the layers, but that feels +// way too brittle.) +func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageSource, error) { + c, err := newDockerClient(sys) + if err != nil { + return nil, fmt.Errorf("initializing docker engine client: %w", err) + } + defer c.Close() + + // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. + // Either way ImageSave should create a tarball with exactly one image. + inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) + if err != nil { + return nil, fmt.Errorf("loading image from docker engine: %w", err) + } + defer inputStream.Close() + + archive, err := tarfile.NewReaderFromStream(sys, inputStream) + if err != nil { + return nil, err + } + src := tarfile.NewSource(archive, true, ref.Transport().Name(), nil, -1) + return &daemonImageSource{ + ref: ref, + Source: src, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *daemonImageSource) Reference() types.ImageReference { + return s.ref +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go new file mode 100644 index 0000000000..9eedff7456 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -0,0 +1,219 @@ +package daemon + +import ( + "context" + "errors" + "fmt" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for images managed by a local Docker daemon. +var Transport = daemonTransport{} + +type daemonTransport struct{} + +// Name returns the name of the transport, which must be unique among other transports. +func (t daemonTransport) Name() string { + return "docker-daemon" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { + // ID values cannot be effectively namespaced, and are clearly invalid host:port values. + if _, err := digest.Parse(scope); err == nil { + return fmt.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) + } + + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// daemonReference is an ImageReference for images managed by a local Docker daemon +// Exactly one of id and ref can be set. +// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) +// For daemonImageDestination, it must be a ref, which is NamedTagged. +// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. +// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) +type daemonReference struct { + id digest.Digest + ref reference.Named // !reference.IsNameOnly +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. + // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). + + // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). + // reference.ParseAnyReference interprets such strings as digests. + if dgst, err := digest.Parse(refString); err == nil { + // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. + // Other digest references are ambiguous, so refuse them. + if dgst.Algorithm() != digest.Canonical { + return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) + } + return NewReference(dgst, nil) + } + + ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values + if err != nil { + return nil, err + } + if reference.FamiliarName(ref) == digest.Canonical.String() { + return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) + } + return NewReference("", ref) +} + +// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) +func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { + if id != "" && ref != nil { + return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") + } + if ref != nil { + if reference.IsNameOnly(ref) { + return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. + // This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported") + } + } + return daemonReference{ + id: id, + ref: ref, + }, nil +} + +func (ref daemonReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref daemonReference) StringWithinTransport() string { + switch { + case ref.id != "": + return ref.id.String() + case ref.ref != nil: + return reference.FamiliarString(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref daemonReference) DockerReference() reference.Named { + return ref.ref // May be nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref daemonReference) PolicyConfigurationIdentity() string { + // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. + // But the existence of image IDs means that we can’t truly well namespace the input: + // a single image can be namespaced either using the name or the ID depending on how it is named. + // + // That’s fairly unexpected, but we have to cope somehow. + // + // So, use the ordinary docker/policyconfiguration namespacing for named images. + // image IDs all fall into the root namespace. + // Users can set up the root namespace to be either untrusted or rejected, + // and to set up specific trust for named namespaces. This allows verifying image + // identity when a name is known, and unnamed images would be untrusted or rejected. + switch { + case ref.id != "": + return "" // This still allows using the default "" scope to define a global policy for ID-identified images. + case ref.ref != nil: + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref daemonReference) PolicyConfigurationNamespaces() []string { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + switch { + case ref.id != "": + return []string{} + case ref.ref != nil: + return policyconfiguration.DockerReferenceNamespaces(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Should this just untag the image? Should this stop running containers? + // The semantics is not quite as clear as for remote repositories. + // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. + return errors.New("Deleting images not implemented for docker-daemon: images") +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go new file mode 100644 index 0000000000..c6498f6ca5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -0,0 +1,1200 @@ +package openshift + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "net" + "net/http" + "net/netip" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "time" + + "dario.cat/mergo" + "github.com/containers/storage/pkg/homedir" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" + "gopkg.in/yaml.v3" +) + +// restTLSClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.TLSClientConfig. +// restTLSClientConfig contains settings to enable transport layer security +type restTLSClientConfig struct { + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +// restConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.Config. +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type restConfig struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // TLSClientConfig contains settings to enable transport layer security + TLSClientConfig restTLSClientConfig + + // Server should be accessed without verifying the TLS + // certificate. For testing only. + Insecure bool +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. +// ClientConfig is used to make it easy to get an api server client +type clientConfig interface { + // ClientConfig returns a complete client config + ClientConfig() (*restConfig, error) +} + +// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. +func defaultClientConfig() clientConfig { + loadingRules := newOpenShiftClientConfigLoadingRules() + // REMOVED: Allowing command-line overriding of loadingRules + // REMOVED: clientcmd.ConfigOverrides + + clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) + + return clientConfig +} + +var recommendedHomeFile = path.Join(homedir.Get(), ".kube/config") + +// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. +// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. +// 1. --config value +// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file +func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { + chain := []string{} + + envVarFile := os.Getenv("KUBECONFIG") + if len(envVarFile) != 0 { + chain = append(chain, filepath.SplitList(envVarFile)...) + } else { + chain = append(chain, recommendedHomeFile) + } + + return &clientConfigLoadingRules{ + Precedence: chain, + // REMOVED: Migration support; run (oc login) to trigger migration + } +} + +// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type deferredLoadingClientConfig struct { + loadingRules *clientConfigLoadingRules + + clientConfig clientConfig +} + +// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { + return &deferredLoadingClientConfig{loadingRules: loadingRules} +} + +func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { + if config.clientConfig == nil { + // REMOVED: Support for concurrent use in multiple threads. + mergedConfig, err := config.loadingRules.Load() + if err != nil { + return nil, err + } + + // REMOVED: Interactive fallback support. + mergedClientConfig := newNonInteractiveClientConfig(*mergedConfig) + + config.clientConfig = mergedClientConfig + } + + return config.clientConfig, nil +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + mergedConfig, err := mergedClientConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: In-cluster service account configuration use. + + return mergedConfig, nil +} + +var ( + // DefaultCluster is the cluster config used when no other config is specified + // TODO: eventually apiserver should start on 443 and be secure by default + defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} + + // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name + envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} +) + +// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type directClientConfig struct { + config clientcmdConfig +} + +// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { + return &directClientConfig{config} +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *directClientConfig) ClientConfig() (*restConfig, error) { + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + configAuthInfo := config.getAuthInfo() + configClusterInfo := config.getCluster() + + clientConfig := &restConfig{} + clientConfig.Host = configClusterInfo.Server + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + + // only try to read the auth information if we are secure + if isConfigTransportTLS(*clientConfig) { + var err error + // REMOVED: Support for interactive fallback. + userAuthPartialConfig := getUserIdentificationPartialConfig(configAuthInfo) + if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil { + return nil, err + } + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configClusterInfo) + if err != nil { + return nil, err + } + if err = mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig); err != nil { + return nil, err + } + } + + return clientConfig, nil +} + +// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configClusterInfo clientcmdCluster) (*restConfig, error) { + mergedConfig := &restConfig{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restConfig{} + configClientConfig.TLSClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.TLSClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + if err := mergo.MergeWithOverwrite(mergedConfig, configClientConfig); err != nil { + return nil, err + } + + return mergedConfig, nil +} + +// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identification +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) *restConfig { + mergedConfig := &restConfig{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.TLSClientConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.TLSClientConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.TLSClientConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.TLSClientConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + + // REMOVED: prompting for missing information. + return mergedConfig +} + +// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. +// ConfirmUsable looks a particular context and determines if that particular part of the config is usable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *directClientConfig) ConfirmUsable() error { + var validationErrors []error + validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) + validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { + return newErrConfigurationInvalid([]error{errEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. +func (config *directClientConfig) getContextName() string { + // REMOVED: overrides support + return config.config.CurrentContext +} + +// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. +func (config *directClientConfig) getAuthInfoName() string { + // REMOVED: overrides support + return config.getContext().AuthInfo +} + +// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. +func (config *directClientConfig) getClusterName() string { + // REMOVED: overrides support + return config.getContext().Cluster +} + +// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. +func (config *directClientConfig) getContext() clientcmdContext { + contexts := config.config.Contexts + contextName := config.getContextName() + + var mergedContext clientcmdContext + if configContext, exists := contexts[contextName]; exists { + if err := mergo.MergeWithOverwrite(&mergedContext, configContext); err != nil { + logrus.Debugf("Can't merge configContext: %v", err) + } + } + // REMOVED: overrides support + + return mergedContext +} + +var ( + errEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + errEmptyCluster = errors.New("cluster has no server defined") +) + +// helper for checking certificate/key/CA +func validateFileIsReadable(name string) error { + answer, err := os.Open(name) + defer func() { + if err := answer.Close(); err != nil { + logrus.Debugf("Error closing %v: %v", name, err) + } + }() + return err +} + +// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { + var validationErrors []error + + if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { + return []error{errEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, errors.New("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + err := validateFileIsReadable(clusterInfo.CertificateAuthority) + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { + var validationErrors []error + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + err := validateFileIsReadable(authInfo.ClientCertificate) + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + err := validateFileIsReadable(authInfo.ClientKey) + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + return validationErrors +} + +// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. +func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { + authInfos := config.config.AuthInfos + authInfoName := config.getAuthInfoName() + + var mergedAuthInfo clientcmdAuthInfo + if configAuthInfo, exists := authInfos[authInfoName]; exists { + if err := mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo); err != nil { + logrus.Debugf("Can't merge configAuthInfo: %v", err) + } + } + // REMOVED: overrides support + + return mergedAuthInfo +} + +// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. +func (config *directClientConfig) getCluster() clientcmdCluster { + clusterInfos := config.config.Clusters + clusterInfoName := config.getClusterName() + + var mergedClusterInfo clientcmdCluster + if err := mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster); err != nil { + logrus.Debugf("Can't merge defaultCluster: %v", err) + } + if err := mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster); err != nil { + logrus.Debugf("Can't merge envVarCluster: %v", err) + } + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + if err := mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo); err != nil { + logrus.Debugf("Can't merge configClusterInfo: %v", err) + } + } + // REMOVED: overrides support + + return mergedClusterInfo +} + +// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregateErr []error + +// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func newAggregate(errlist []error) error { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregateErr(errs) +} + +// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. +// Error is part of the error interface. +func (agg aggregateErr) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) + } + result += "]" + return result +} + +// REMOVED: aggregateErr.Errors + +// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +var _ error = errConfigurationInvalid{} + +// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. + +// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) +} + +// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules +// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config +// Callers can put the chain together however they want, but we'd recommend: +// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath +// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present +type clientConfigLoadingRules struct { + Precedence []string +} + +// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load +// Load starts by running the MigrationRules and then +// takes the loading rules and returns a Config object based on following rules. +// +// - if the ExplicitPath, return the unmerged explicit file +// - Otherwise, return a merged config based on the Precedence slice +// +// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. +// Read errors or files with non-deserializable content produce errors. +// The first file to set a particular map key wins and map key's value is never changed. +// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. +// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. +// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder +// and only absolute file paths are returned. +func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { + errlist := []error{} + + kubeConfigFiles := []string{} + + // REMOVED: explicit path support + kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) + + kubeconfigs := []*clientcmdConfig{} + // read and cache the config files so that we only look at them once + for _, filename := range kubeConfigFiles { + if len(filename) == 0 { + // no work to do + continue + } + + config, err := loadFromFile(filename) + if os.IsNotExist(err) { + // skip missing files + continue + } + if err != nil { + errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err)) + continue + } + + kubeconfigs = append(kubeconfigs, config) + } + + // first merge all of our maps + mapConfig := clientcmdNewConfig() + for _, kubeconfig := range kubeconfigs { + if err := mergo.MergeWithOverwrite(mapConfig, kubeconfig); err != nil { + return nil, err + } + } + + // merge all of the struct values in the reverse order so that priority is given correctly + // errors are not added to the list the second time + nonMapConfig := clientcmdNewConfig() + for i := len(kubeconfigs) - 1; i >= 0; i-- { + kubeconfig := kubeconfigs[i] + if err := mergo.MergeWithOverwrite(nonMapConfig, kubeconfig); err != nil { + return nil, err + } + } + + // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and + // get the values we expect. + config := clientcmdNewConfig() + if err := mergo.MergeWithOverwrite(config, mapConfig); err != nil { + return nil, err + } + if err := mergo.MergeWithOverwrite(config, nonMapConfig); err != nil { + return nil, err + } + + // REMOVED: Possibility to skip this. + if err := resolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + + return config, newAggregate(errlist) +} + +// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile +// LoadFromFile takes a filename and deserializes the contents into Config object +func loadFromFile(filename string) (*clientcmdConfig, error) { + kubeconfigBytes, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := load(kubeconfigBytes) + if err != nil { + return nil, err + } + + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdAuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdCluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdContext{} + } + + return config, nil +} + +// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load +// Load takes a byte slice and deserializes the contents into Config object. +// Encapsulates deserialization without assuming the source is a file. +func load(data []byte) (*clientcmdConfig, error) { + config := clientcmdNewConfig() + // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) + if len(data) == 0 { + return config, nil + } + // Note: This does absolutely no kind/version checking or conversions. + if err := yaml.Unmarshal(data, config); err != nil { + return nil, err + } + return config, nil +} + +// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func resolveLocalPaths(config *clientcmdConfig) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("Could not determine the absolute path of config file %s: %w", cluster.LocationOfOrigin, err) + } + + if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("Could not determine the absolute path of config file %s: %w", authInfo.LocationOfOrigin, err) + } + + if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. +func getClusterFileReferences(cluster *clientcmdCluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. +func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { + return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} +} + +// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func resolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// restClientFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.RESTClientFor. +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { + // REMOVED: Configurable GroupVersion, Codec + // REMOVED: Configurable versionedAPIPath + baseURL, err := defaultServerURLFor(config) + if err != nil { + return nil, nil, err + } + + transport, err := transportFor(config) + if err != nil { + return nil, nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + } + + // REMOVED: Configurable QPS, Burst, ContentConfig + // REMOVED: Actually returning a RESTClient object. + return baseURL, httpClient, nil +} + +// defaultServerURL is a modified copy of k8s.io/kubernetes/pkg/client/restclient.DefaultServerURL. +// DefaultServerURL converts a host, host:port, or URL string to the default base server API path +// to use with a Client at a given API version following the standard conventions for a +// Kubernetes API. +func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { + if host == "" { + return nil, errors.New("host must be a URL or a host:port pair") + } + base := host + hostURL, err := url.Parse(base) + if err != nil { + return nil, err + } + if hostURL.Scheme == "" { + scheme := "http://" + if defaultTLS { + scheme = "https://" + } + hostURL, err = url.Parse(scheme + base) + if err != nil { + return nil, err + } + if hostURL.Path != "" && hostURL.Path != "/" { + return nil, fmt.Errorf("host must be a URL or a host:port pair: %q", base) + } + } + + // REMOVED: versionedAPIPath computation. + return hostURL, nil +} + +// defaultServerURLFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.defaultServerURLFor. +// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// requires Host and Version to be set prior to being called. +func defaultServerURLFor(config *restConfig) (*url.URL, error) { + // TODO: move the default to secure when the apiserver supports TLS by default + // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." + hasCA := len(config.TLSClientConfig.CAFile) != 0 || len(config.TLSClientConfig.CAData) != 0 + hasCert := len(config.TLSClientConfig.CertFile) != 0 || len(config.TLSClientConfig.CertData) != 0 + defaultTLS := hasCA || hasCert || config.Insecure + host := config.Host + if host == "" { + host = "localhost" + } + + // REMOVED: Configurable APIPath, GroupVersion + return defaultServerURL(host, defaultTLS) +} + +// transportFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.transportFor. +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func transportFor(config *restConfig) (http.RoundTripper, error) { + // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support + return transportNew(config) +} + +// isConfigTransportTLS is a modified copy of k8s.io/kubernetes/pkg/client/restclient.IsConfigTransportTLS. +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func isConfigTransportTLS(config restConfig) bool { + baseURL, err := defaultServerURLFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func transportNew(config *restConfig) (http.RoundTripper, error) { + // REMOVED: custom config.Transport support. + // Set transport level security + + var ( + rt http.RoundTripper + err error + ) + + rt, err = tlsCacheGet(config) + if err != nil { + return nil, err + } + + // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. + if len(config.Username) != 0 && len(config.BearerToken) != 0 { + return nil, errors.New("username/password or bearer token may be set, but not both") + } + + return rt, nil +} + +// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []netip.Prefix{} + for _, noProxyRule := range noProxyRules { + prefix, err := netip.ParsePrefix(noProxyRule) + if err == nil { + cidrs = append(cidrs, prefix) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if _, err := netip.ParseAddr(host); err != nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip, err := netip.ParseAddr(host) + if err != nil { + return delegate(req) + } + + if slices.ContainsFunc(cidrs, func(cidr netip.Prefix) bool { + return cidr.Contains(ip) + }) { + return nil, nil + } + + return delegate(req) + } +} + +// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. +func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { + // REMOVED: any actual caching + + // Get the TLS options for this client config + tlsConfig, err := tlsConfigFor(config) + if err != nil { + return nil, err + } + // The options didn't require a custom TLS config + if tlsConfig == nil { + return http.DefaultTransport, nil + } + + // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. + t := &http.Transport{ + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + } + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { + t.ForceAttemptHTTP2 = true + } + return t, nil +} + +// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func tlsConfigFor(c *restConfig) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { + return nil, nil + } + if c.HasCA() && c.Insecure { + return nil, errors.New("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.Insecure, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.TLSClientConfig.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.TLSClientConfig.CertData, c.TLSClientConfig.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *restConfig) error { + var err error + c.TLSClientConfig.CAData, err = dataFromSliceOrFile(c.TLSClientConfig.CAData, c.TLSClientConfig.CAFile) + if err != nil { + return err + } + + c.TLSClientConfig.CertData, err = dataFromSliceOrFile(c.TLSClientConfig.CertData, c.TLSClientConfig.CertFile) + if err != nil { + return err + } + + c.TLSClientConfig.KeyData, err = dataFromSliceOrFile(c.TLSClientConfig.KeyData, c.TLSClientConfig.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := os.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} + +// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. +// HasCA returns whether the configuration has a certificate authority or not. +func (c *restConfig) HasCA() bool { + return len(c.TLSClientConfig.CAData) > 0 || len(c.TLSClientConfig.CAFile) > 0 +} + +// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *restConfig) HasCertAuth() bool { + return len(c.TLSClientConfig.CertData) != 0 || len(c.TLSClientConfig.CertFile) != 0 +} + +// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type clientcmdConfig struct { + // Clusters is a map of referenceable names to cluster configs + Clusters clustersMap `yaml:"clusters"` + // AuthInfos is a map of referenceable names to user configs + AuthInfos authInfosMap `yaml:"users"` + // Contexts is a map of referenceable names to context configs + Contexts contextsMap `yaml:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `yaml:"current-context"` +} + +type clustersMap map[string]*clientcmdCluster + +func (m *clustersMap) UnmarshalYAML(value *yaml.Node) error { + var a []v1NamedCluster + if err := value.Decode(&a); err != nil { + return err + } + for _, e := range a { + cluster := e.Cluster // Allocates a new instance in each iteration + (*m)[e.Name] = &cluster + } + return nil +} + +type authInfosMap map[string]*clientcmdAuthInfo + +func (m *authInfosMap) UnmarshalYAML(value *yaml.Node) error { + var a []v1NamedAuthInfo + if err := value.Decode(&a); err != nil { + return err + } + for _, e := range a { + authInfo := e.AuthInfo // Allocates a new instance in each iteration + (*m)[e.Name] = &authInfo + } + return nil +} + +type contextsMap map[string]*clientcmdContext + +func (m *contextsMap) UnmarshalYAML(value *yaml.Node) error { + var a []v1NamedContext + if err := value.Decode(&a); err != nil { + return err + } + for _, e := range a { + context := e.Context // Allocates a new instance in each iteration + (*m)[e.Name] = &context + } + return nil +} + +// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func clientcmdNewConfig() *clientcmdConfig { + return &clientcmdConfig{ + Clusters: make(map[string]*clientcmdCluster), + AuthInfos: make(map[string]*clientcmdAuthInfo), + Contexts: make(map[string]*clientcmdContext), + } +} + +// yamlBinaryAsBase64String is a []byte that can be stored in yaml as a !!str, not a !!binary +type yamlBinaryAsBase64String []byte + +func (bin *yamlBinaryAsBase64String) UnmarshalText(text []byte) error { + res := make([]byte, base64.StdEncoding.DecodedLen(len(text))) + n, err := base64.StdEncoding.Decode(res, text) + if err != nil { + return err + } + *bin = res[:n] + return nil +} + +// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. +// Cluster contains information about how to communicate with a kubernetes cluster +type clientcmdCluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `yaml:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + InsecureSkipTLSVerify bool `yaml:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + CertificateAuthority string `yaml:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + CertificateAuthorityData yamlBinaryAsBase64String `yaml:"certificate-authority-data,omitempty"` +} + +// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type clientcmdAuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + ClientCertificate string `yaml:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + ClientCertificateData yamlBinaryAsBase64String `yaml:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + ClientKey string `yaml:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + ClientKeyData yamlBinaryAsBase64String `yaml:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + Token string `yaml:"token,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + Username string `yaml:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + Password string `yaml:"password,omitempty"` +} + +// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type clientcmdContext struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `yaml:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `yaml:"user"` + // Namespace is the default namespace to use on unspecified requests + Namespace string `yaml:"namespace,omitempty"` +} + +// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. +// NamedCluster relates nicknames to cluster information +type v1NamedCluster struct { + // Name is the nickname for this Cluster + Name string `yaml:"name"` + // Cluster holds the cluster information + Cluster clientcmdCluster `yaml:"cluster"` +} + +// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. +// NamedContext relates nicknames to context information +type v1NamedContext struct { + // Name is the nickname for this Context + Name string `yaml:"name"` + // Context holds the context information + Context clientcmdContext `yaml:"context"` +} + +// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. +// NamedAuthInfo relates nicknames to auth information +type v1NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `yaml:"name"` + // AuthInfo holds the auth information + AuthInfo clientcmdAuthInfo `yaml:"user"` +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go new file mode 100644 index 0000000000..2c69afbe94 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -0,0 +1,226 @@ +package openshift + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/version" + "github.com/sirupsen/logrus" +) + +// openshiftClient is configuration for dealing with a single image stream, for reading or writing. +type openshiftClient struct { + ref openshiftReference + baseURL *url.URL + // Values from Kubernetes configuration + httpClient *http.Client + bearerToken string // "" if not used + username string // "" if not used + password string // if username != "" +} + +// newOpenshiftClient creates a new openshiftClient for the specified reference. +func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { + // We have already done this parsing in ParseReference, but thrown away + // httpClient. So, parse again. + // (We could also rework/split restClientFor to "get base URL" to be done + // in ParseReference, and "get httpClient" to be done here. But until/unless + // we support non-default clusters, this is good enough.) + + // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. + cmdConfig := defaultClientConfig() + logrus.Debugf("cmdConfig: %#v", cmdConfig) + restConfig, err := cmdConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) + logrus.Debugf("restConfig: %#v", restConfig) + baseURL, httpClient, err := restClientFor(restConfig) + if err != nil { + return nil, err + } + logrus.Debugf("URL: %#v", *baseURL) + + if httpClient == nil { + httpClient = http.DefaultClient + } + + return &openshiftClient{ + ref: ref, + baseURL: baseURL, + httpClient: httpClient, + bearerToken: restConfig.BearerToken, + username: restConfig.Username, + password: restConfig.Password, + }, nil +} + +func (c *openshiftClient) close() { + c.httpClient.CloseIdleConnections() +} + +// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. +func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { + requestURL := *c.baseURL + requestURL.Path = path + var requestBodyReader io.Reader + if requestBody != nil { + logrus.Debugf("Will send body: %s", requestBody) + requestBodyReader = bytes.NewReader(requestBody) + } + req, err := http.NewRequestWithContext(ctx, method, requestURL.String(), requestBodyReader) + if err != nil { + return nil, err + } + + if len(c.bearerToken) != 0 { + req.Header.Set("Authorization", "Bearer "+c.bearerToken) + } else if len(c.username) != 0 { + req.SetBasicAuth(c.username, c.password) + } + req.Header.Set("Accept", "application/json, */*") + req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) + if requestBody != nil { + req.Header.Set("Content-Type", "application/json") + } + + logrus.Debugf("%s %s", method, requestURL.Redacted()) + res, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxOpenShiftStatusBody) + if err != nil { + return nil, err + } + logrus.Debugf("Got body: %s", body) + // FIXME: Just throwing this useful information away only to try to guess later... + logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) + + var status status + statusValid := false + if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { + statusValid = true + } + + switch { + case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. + if statusValid && status.Status != "Success" { + return nil, errors.New(status.Message) + } + case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: + // OK. + default: + if statusValid { + return nil, errors.New(status.Message) + } + return nil, fmt.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) + } + + return body, nil +} + +// getImage loads the specified image object. +func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) + body, err := c.doRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + // Note: This does absolutely no kind/version checking or conversions. + var isi imageStreamImage + if err := json.Unmarshal(body, &isi); err != nil { + return nil, err + } + return &isi.Image, nil +} + +// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; +// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. +func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { + _, repo, gotRepo := strings.Cut(ref, "/") + if !gotRepo { + return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref) + } + return reference.Domain(c.ref.dockerReference) + "/" + repo, nil +} + +// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. +type imageStream struct { + Status imageStreamStatus `json:"status,omitempty"` +} +type imageStreamStatus struct { + DockerImageRepository string `json:"dockerImageRepository"` + Tags []namedTagEventList `json:"tags,omitempty"` +} +type namedTagEventList struct { + Tag string `json:"tag"` + Items []tagEvent `json:"items"` +} +type tagEvent struct { + DockerImageReference string `json:"dockerImageReference"` + Image string `json:"image"` +} +type imageStreamImage struct { + Image image `json:"image"` +} +type image struct { + objectMeta `json:"metadata,omitempty"` + DockerImageReference string `json:"dockerImageReference,omitempty"` + // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` + DockerImageManifest string `json:"dockerImageManifest,omitempty"` + // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` + Signatures []imageSignature `json:"signatures,omitempty"` +} + +const imageSignatureTypeAtomic string = "atomic" + +type imageSignature struct { + typeMeta `json:",inline"` + objectMeta `json:"metadata,omitempty"` + Type string `json:"type"` + Content []byte `json:"content"` + // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // ImageIdentity string `json:"imageIdentity,omitempty"` + // SignedClaims map[string]string `json:"signedClaims,omitempty"` + // Created *unversioned.Time `json:"created,omitempty"` + // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` + // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` +} +type typeMeta struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} +type objectMeta struct { + Name string `json:"name,omitempty"` + GenerateName string `json:"generateName,omitempty"` + Namespace string `json:"namespace,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + Generation int64 `json:"generation,omitempty"` + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status +type status struct { + Status string `json:"status,omitempty"` + Message string `json:"message,omitempty"` + // Reason StatusReason `json:"reason,omitempty"` + // Details *StatusDetails `json:"details,omitempty"` + Code int32 `json:"code,omitempty"` +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go new file mode 100644 index 0000000000..50a5339e1b --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -0,0 +1,248 @@ +package openshift + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" +) + +type openshiftImageDestination struct { + impl.Compat + stubs.AlwaysSupportsSignatures + + client *openshiftClient + docker private.ImageDestination // The docker/distribution API endpoint + // State + imageStreamImageName string // "" if not yet known +} + +// newImageDestination creates a new ImageDestination for the specified reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (private.ImageDestination, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, + // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know + // the manifest digest at this point. + dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) + dockerRef, err := docker.ParseReference(dockerRefString) + if err != nil { + return nil, err + } + docker, err := dockerRef.NewImageDestination(ctx, sys) + if err != nil { + return nil, err + } + + d := &openshiftImageDestination{ + client: client, + docker: imagedestination.FromPublic(docker), + } + d.Compat = impl.AddCompat(d) + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *openshiftImageDestination) Reference() types.ImageReference { + return d.client.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *openshiftImageDestination) Close() error { + err := d.docker.Close() + d.client.close() + return err +} + +func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { + return d.docker.SupportedManifestMIMETypes() +} + +func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.docker.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (d *openshiftImageDestination) SupportsPutBlobPartial() bool { + return d.docker.SupportsPutBlobPartial() +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options) +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { + return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + return d.docker.TryReusingBlobWithOptions(ctx, info, options) +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest == nil { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return err + } + d.imageStreamImageName = manifestDigest.String() + } + return d.docker.PutManifest(ctx, m, instanceDigest) +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + var imageStreamImageName string + if instanceDigest == nil { + if d.imageStreamImageName == "" { + return errors.New("Internal error: Unknown manifest digest, can't add signatures") + } + imageStreamImageName = d.imageStreamImageName + } else { + imageStreamImageName = instanceDigest.String() + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures. + + if len(signatures) == 0 { + return nil // No need to even read the old state. + } + + image, err := d.client.getImage(ctx, imageStreamImageName) + if err != nil { + return err + } + existingSigNames := set.New[string]() + for _, sig := range image.Signatures { + existingSigNames.Add(sig.objectMeta.Name) + } + + for _, newSigWithFormat := range signatures { + newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) + if !ok { + return signature.UnsupportedFormatError(newSigWithFormat) + } + newSig := newSigSimple.UntrustedSignature() + + if slices.ContainsFunc(image.Signatures, func(existingSig imageSignature) bool { + return existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) + }) { + continue + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return fmt.Errorf("generating random signature len %d: %w", n, err) + } + signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes) + if !existingSigNames.Contains(signatureName) { + break + } + } + // Note: This does absolutely no kind/version checking or conversions. + sig := imageSignature{ + typeMeta: typeMeta{ + Kind: "ImageSignature", + APIVersion: "v1", + }, + objectMeta: objectMeta{Name: signatureName}, + Type: imageSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + if err != nil { + return err + } + _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body) + if err != nil { + return err + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.docker.Commit(ctx, unparsedToplevel) +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_src.go b/vendor/github.com/containers/image/v5/openshift/openshift_src.go new file mode 100644 index 0000000000..0ac0127ee7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift_src.go @@ -0,0 +1,174 @@ +package openshift + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +type openshiftImageSource struct { + impl.Compat + impl.DoesNotAffectLayerInfosForCopy + // This is slightly suboptimal. We could forward GetBlobAt(), but we need to call ensureImageIsResolved in SupportsGetBlobAt(), + // and that method doesn’t provide a context for timing out. That could actually be fixed (SupportsGetBlobAt is private and we + // can change it), but this is a deprecated transport anyway, so for now we just punt. + stubs.NoGetBlobAtInitialize + + client *openshiftClient + // Values specific to this image + sys *types.SystemContext + // State + docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet + imageStreamImageName string // Resolved image identifier, or "" if not known yet +} + +// newImageSource creates a new ImageSource for the specified reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(sys *types.SystemContext, ref openshiftReference) (private.ImageSource, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + s := &openshiftImageSource{ + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + client: client, + sys: sys, + } + s.Compat = impl.AddCompat(s) + return s, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *openshiftImageSource) Reference() types.ImageReference { + return s.client.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *openshiftImageSource) Close() error { + var err error + if s.docker != nil { + err = s.docker.Close() + s.docker = nil + } + + s.client.close() + + return err +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, "", err + } + return s.docker.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, 0, err + } + return s.docker.GetBlob(ctx, info, cache) +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + var imageStreamImageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageStreamImageName = s.imageStreamImageName + } else { + imageStreamImageName = instanceDigest.String() + } + image, err := s.client.getImage(ctx, imageStreamImageName) + if err != nil { + return nil, err + } + var sigs []signature.Signature + for _, sig := range image.Signatures { + if sig.Type == imageSignatureTypeAtomic { + sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content)) + } + } + return sigs, nil +} + +// ensureImageIsResolved sets up s.docker and s.imageStreamImageName +func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { + if s.docker != nil { + return nil + } + + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) + body, err := s.client.doRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return err + } + // Note: This does absolutely no kind/version checking or conversions. + var is imageStream + if err := json.Unmarshal(body, &is); err != nil { + return err + } + var te *tagEvent + for _, tag := range is.Status.Tags { + if tag.Tag != s.client.ref.dockerReference.Tag() { + continue + } + if len(tag.Items) > 0 { + te = &tag.Items[0] + break + } + } + if te == nil { + return errors.New("No matching tag found") + } + logrus.Debugf("tag event %#v", te) + dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) + if err != nil { + return err + } + logrus.Debugf("Resolved reference %#v", dockerRefString) + dockerRef, err := docker.ParseReference("//" + dockerRefString) + if err != nil { + return err + } + d, err := dockerRef.NewImageSource(ctx, s.sys) + if err != nil { + return err + } + s.docker = d + s.imageStreamImageName = te.Image + return nil +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go new file mode 100644 index 0000000000..0ba435d560 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -0,0 +1,153 @@ +package openshift + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + genericImage "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/regexp" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OpenShift registry-hosted images. +var Transport = openshiftTransport{} + +type openshiftTransport struct{} + +func (t openshiftTransport) Name() string { + return "atomic" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// Note that imageNameRegexp is namespace/stream:tag, this +// is HOSTNAME/namespace/stream:tag or parent prefixes. +// Keep this in sync with imageNameRegexp! +var scopeRegexp = regexp.Delayed("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { + if scopeRegexp.FindStringIndex(scope) == nil { + return fmt.Errorf("Invalid scope name %s", scope) + } + return nil +} + +// openshiftReference is an ImageReference for OpenShift images. +type openshiftReference struct { + dockerReference reference.NamedTagged + namespace string // Computed from dockerReference in advance. + stream string // Computed from dockerReference in advance. +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. +func ParseReference(ref string) (types.ImageReference, error) { + r, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, fmt.Errorf("failed to parse image reference %q: %w", ref, err) + } + tagged, ok := r.(reference.NamedTagged) + if !ok { + return nil, fmt.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) + } + return NewReference(tagged) +} + +// NewReference returns an OpenShift reference for a reference.NamedTagged +func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { + r := strings.SplitN(reference.Path(dockerRef), "/", 3) + if len(r) != 2 { + return nil, fmt.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", + reference.FamiliarString(dockerRef)) + } + return openshiftReference{ + namespace: r[0], + stream: r[1], + dockerReference: dockerRef, + }, nil +} + +func (ref openshiftReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref openshiftReference) StringWithinTransport() string { + return reference.FamiliarString(ref.dockerReference) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref openshiftReference) DockerReference() reference.Named { + return ref.dockerReference +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref openshiftReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) + if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref openshiftReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return genericImage.FromReference(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for atomic: images") +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go new file mode 100644 index 0000000000..d00a0cdf86 --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -0,0 +1,517 @@ +//go:build containers_image_ostree +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/klauspost/pgzip" + "github.com/opencontainers/go-digest" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type blobToImport struct { + Size int64 + Digest digest.Digest + BlobPath string +} + +type descriptor struct { + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` +} + +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type manifestSchema struct { + LayersDescriptors []descriptor `json:"layers"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` +} + +type ostreeImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + stubs.AlwaysSupportsSignatures + + ref ostreeReference + manifest string + schema manifestSchema + tmpDirPath string + blobs map[string]*blobToImport + digest digest.Digest + signaturesLen int + repo *C.struct_OstreeRepo +} + +// newImageDestination returns an ImageDestination for writing to an existing ostree. +func newImageDestination(ref ostreeReference, tmpDirPath string) (private.ImageDestination, error) { + tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) + if err := ensureDirectoryExists(tmpDirPath); err != nil { + return nil, err + } + d := &ostreeImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{manifest.DockerV2Schema2MediaType}, + DesiredLayerCompression: types.PreserveOriginal, + AcceptsForeignLayerURLs: false, + MustMatchRuntimeOS: true, + IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. + HasThreadSafePutBlob: false, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + + ref: ref, + manifest: "", + schema: manifestSchema{}, + tmpDirPath: tmpDirPath, + blobs: map[string]*blobToImport{}, + digest: "", + signaturesLen: 0, + repo: nil, + } + d.Compat = impl.AddCompat(d) + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ostreeImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ostreeImageDestination) Close() error { + if d.repo != nil { + C.g_object_unref(C.gpointer(d.repo)) + } + return os.RemoveAll(d.tmpDirPath) +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob") + if err != nil { + return private.UploadedBlob{}, err + } + + blobPath := filepath.Join(tmpDir, "content") + blobFile, err := os.Create(blobPath) + if err != nil { + return private.UploadedBlob{}, err + } + defer blobFile.Close() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return private.UploadedBlob{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return private.UploadedBlob{}, err + } + + hash := blobDigest.Hex() + d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath} + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil +} + +func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { + entries, err := os.ReadDir(dir) + if err != nil { + return err + } + + for _, entry := range entries { + fullpath := filepath.Join(dir, entry.Name()) + if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + if err := os.Remove(fullpath); err != nil { + return err + } + continue + } + + info, err := entry.Info() + if err != nil { + return err + } + if selinuxHnd != nil { + relPath, err := filepath.Rel(root, fullpath) + if err != nil { + return err + } + // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, + // thus we benefit from maintaining the same SELinux label they would have on the host as we could + // use hard links instead of copying the files. + relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) + + relPathC := C.CString(relPath) + defer C.free(unsafe.Pointer(relPathC)) + var context *C.char + + res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) + if int(res) < 0 && err != syscall.ENOENT { + return fmt.Errorf("cannot selabel_lookup_raw %s: %w", relPath, err) + } + if int(res) == 0 { + defer C.freecon(context) + fullpathC := C.CString(fullpath) + defer C.free(unsafe.Pointer(fullpathC)) + res, err = C.lsetfilecon_raw(fullpathC, context) + if int(res) < 0 { + return fmt.Errorf("cannot setfilecon_raw %s to %s: %w", fullpath, C.GoString(context), err) + } + } + } + + if entry.IsDir() { + if usermode { + if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { + return err + } + } + err = fixFiles(selinuxHnd, root, fullpath, usermode) + if err != nil { + return err + } + } else if usermode && (entry.Type().IsRegular()) { + if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { + return err + } + } + } + + return nil +} + +func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { + opts := otbuiltin.NewCommitOptions() + opts.AddMetadataString = metadata + opts.Timestamp = time.Now() + // OCI layers have no parent OSTree commit + opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + _, err := repo.Commit(root, branch, opts) + return err +} + +func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { + mfz := pgzip.NewWriter(output) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + stream, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return "", -1, err + } + defer stream.Close() + + gzReader, err := archive.DecompressStream(stream) + if err != nil { + return "", -1, err + } + defer gzReader.Close() + + its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) + if err != nil { + return "", -1, err + } + + digester := digest.Canonical.Digester() + + written, err := io.Copy(digester.Hash(), its) + if err != nil { + return "", -1, err + } + + return digester.Digest(), written, nil +} + +func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") + if err := ensureDirectoryExists(destinationPath); err != nil { + return err + } + defer func() { + os.Remove(blob.BlobPath) + os.RemoveAll(destinationPath) + }() + + var tarSplitOutput bytes.Buffer + uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) + if err != nil { + return err + } + + if os.Getuid() == 0 { + if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { + return err + } + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { + return err + } + } else { + os.MkdirAll(destinationPath, 0755) + if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { + return err + } + + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { + return err + } + } + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), + fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), + fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), + fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) + +} + +func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Dir(blob.BlobPath) + + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } + if d.repo == nil { + repo, err := openRepo(d.ref.repo) + if err != nil { + return false, private.ReusedBlob{}, err + } + d.repo = repo + } + branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + + found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return found, private.ReusedBlob{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return found, private.ReusedBlob{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.size") + if err != nil || !found { + return found, private.ReusedBlob{}, err + } + + size, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return false, private.ReusedBlob{}, err + } + + return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil +} + +// PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + + d.manifest = string(manifestBlob) + + if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { + return err + } + + manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) + if err := ensureParentDirectoryExists(manifestPath); err != nil { + return err + } + + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + d.digest = digest + + return os.WriteFile(manifestPath, manifestBlob, 0644) +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + + path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) + if err := ensureParentDirectoryExists(path); err != nil { + return err + } + + for i, sig := range signatures { + signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) + blob, err := signature.Blob(sig) + if err != nil { + return err + } + if err := os.WriteFile(signaturePath, blob, 0644); err != nil { + return err + } + } + d.signaturesLen = len(signatures) + return nil +} + +func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + repo, err := otbuiltin.OpenRepo(d.ref.repo) + if err != nil { + return err + } + + _, err = repo.PrepareTransaction() + if err != nil { + return err + } + + var selinuxHnd *C.struct_selabel_handle + + if os.Getuid() == 0 && selinux.GetEnabled() { + selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) + if selinuxHnd == nil { + return fmt.Errorf("cannot open the SELinux DB: %w", err) + } + + defer C.selabel_close(selinuxHnd) + } + + checkLayer := func(hash string) error { + blob := d.blobs[hash] + // if the blob is not present in d.blobs then it is already stored in OSTree, + // and we don't need to import it. + if blob == nil { + return nil + } + err := d.importBlob(selinuxHnd, repo, blob) + if err != nil { + return err + } + + delete(d.blobs, hash) + return nil + } + for _, layer := range d.schema.LayersDescriptors { + hash := layer.Digest.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + for _, layer := range d.schema.FSLayers { + hash := layer.BlobSum.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + + // Import the other blobs that are not layers + for _, blob := range d.blobs { + err := d.importConfig(repo, blob) + if err != nil { + return err + } + } + + manifestPath := filepath.Join(d.tmpDirPath, "manifest") + + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), + fmt.Sprintf("signatures=%d", d.signaturesLen), + fmt.Sprintf("docker.digest=%s", string(d.digest))} + if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { + return err + } + + _, err = repo.CommitTransaction() + return err +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go new file mode 100644 index 0000000000..9983acc0a6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -0,0 +1,450 @@ +//go:build containers_image_ostree +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unsafe" + + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type ostreeImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoGetBlobAtInitialize + + ref ostreeReference + tmpDir string + repo *C.struct_OstreeRepo + // get the compressed layer by its uncompressed checksum + compressed map[digest.Digest]digest.Digest +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(tmpDir string, ref ostreeReference) (private.ImageSource, error) { + s := &ostreeImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + tmpDir: tmpDir, + compressed: nil, + } + s.Compat = impl.AddCompat(s) + return s, nil +} + +// Reference returns the reference used to set up this source. +func (s *ostreeImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ostreeImageSource) Close() error { + if s.repo != nil { + C.g_object_unref(C.gpointer(s.repo)) + } + return nil +} + +func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) { + var metadataKey string + if isCompressed { + metadataKey = "docker.uncompressed_size" + } else { + metadataKey = "docker.size" + } + b := fmt.Sprintf("ociimage/%s", blob) + found, data, err := readMetadata(s.repo, b, metadataKey) + if err != nil || !found { + return 0, err + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getLenSignatures() (int64, error) { + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, data, err := readMetadata(s.repo, b, "signatures") + if err != nil { + return -1, err + } + if !found { + // if 'signatures' is not present, just return 0 signatures. + return 0, nil + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, out, err := readMetadata(s.repo, b, "tarsplit.output") + if err != nil || !found { + return nil, err + } + return base64.StdEncoding.DecodeString(out) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be non-default instances. +func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`) + } + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, "", err + } + s.repo = repo + } + + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, out, err := readMetadata(s.repo, b, "docker.manifest") + if err != nil { + return nil, "", err + } + if !found { + return nil, "", errors.New("manifest not found") + } + m := []byte(out) + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", errors.New("manifest lists are not supported by this transport") +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +type ostreePathFileGetter struct { + repo *C.struct_OstreeRepo + parentRoot *C.GFile +} + +type ostreeReader struct { + stream *C.GFileInputStream +} + +func (o ostreeReader) Close() error { + C.g_object_unref(C.gpointer(o.stream)) + return nil +} +func (o ostreeReader) Read(p []byte) (int, error) { + var cerr *C.GError + instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) + stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) + + b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) + if b == nil { + return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_bytes_unref(b) + + count := int(C.g_bytes_get_size(b)) + if count == 0 { + return 0, io.EOF + } + data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + copy(p, data) + return count, nil +} + +func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { + var cerr *C.GError + var ref *C.char + defer C.free(unsafe.Pointer(ref)) + + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + if ref == nil { + return false, "", nil + } + + var variant *C.GVariant + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_variant_unref(variant) + if variant != nil { + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + metadata := C.g_variant_get_child_value(variant, 0) + defer C.g_variant_unref(metadata) + + data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) + if data != nil { + defer C.g_variant_unref(data) + ptr := (*C.char)(C.g_variant_get_string(data, nil)) + val := C.GoString(ptr) + return true, val, nil + } + } + return false, "", nil +} + +func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { + var cerr *C.GError + var parentRoot *C.GFile + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { + return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + C.g_object_ref(C.gpointer(repo)) + + return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil +} + +func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { + var file *C.GFile + if strings.HasPrefix(filename, "./") { + filename = filename[2:] + } + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + + file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) + + var cerr *C.GError + stream := C.g_file_read(file, nil, &cerr) + if stream == nil { + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + return &ostreeReader{stream: stream}, nil +} + +func (o ostreePathFileGetter) Close() { + C.g_object_unref(C.gpointer(o.repo)) + C.g_object_unref(C.gpointer(o.parentRoot)) +} + +func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { + getter, err := newOSTreePathFileGetter(s.repo, commit) + if err != nil { + return nil, err + } + defer getter.Close() + + return getter.Get(path) +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + + blob := info.Digest.Hex() + + // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. + if s.compressed == nil { + _, err := s.LayerInfosForCopy(ctx, nil) + if err != nil { + return nil, -1, err + } + + } + compressedBlob, isCompressed := s.compressed[info.Digest] + if isCompressed { + blob = compressedBlob.Hex() + } + branch := fmt.Sprintf("ociimage/%s", blob) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, 0, err + } + s.repo = repo + } + + layerSize, err := s.getBlobUncompressedSize(blob, isCompressed) + if err != nil { + return nil, 0, err + } + + tarsplit, err := s.getTarSplitData(blob) + if err != nil { + return nil, 0, err + } + + // if tarsplit is nil we are looking at the manifest. Return directly the file in /content + if tarsplit == nil { + file, err := s.readSingleFile(branch, "/content") + if err != nil { + return nil, 0, err + } + return file, layerSize, nil + } + + mf := bytes.NewReader(tarsplit) + mfz, err := pgzip.NewReader(mf) + if err != nil { + return nil, 0, err + } + metaUnpacker := storage.NewJSONUnpacker(mfz) + + getter, err := newOSTreePathFileGetter(s.repo, branch) + if err != nil { + mfz.Close() + return nil, 0, err + } + + ots := asm.NewOutputTarStream(getter, metaUnpacker) + + rc := ioutils.NewReadCloserWrapper(ots, func() error { + getter.Close() + mfz.Close() + return ots.Close() + }) + return rc, layerSize, nil +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ostreeImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + if instanceDigest != nil { + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) + } + lenSignatures, err := s.getLenSignatures() + if err != nil { + return nil, err + } + branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, err + } + s.repo = repo + } + + signatures := []signature.Signature{} + for i := int64(1); i <= lenSignatures; i++ { + path := fmt.Sprintf("/signature-%d", i) + sigReader, err := s.readSingleFile(branch, path) + if err != nil { + return nil, err + } + defer sigReader.Close() + + sigBlob, err := io.ReadAll(sigReader) + if err != nil { + return nil, err + } + sig, err := signature.FromBlob(sigBlob) + if err != nil { + return nil, fmt.Errorf("parsing signature %q: %w", path, err) + } + signatures = append(signatures, sig) + } + return signatures, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + if instanceDigest != nil { + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) + } + + updatedBlobInfos := []types.BlobInfo{} + manifestBlob, manifestType, err := s.GetManifest(ctx, nil) + if err != nil { + return nil, err + } + + man, err := manifest.FromBlob(manifestBlob, manifestType) + + s.compressed = make(map[digest.Digest]digest.Digest) + + layerBlobs := man.LayerInfos() + + for _, layerBlob := range layerBlobs { + branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) + found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return nil, err + } + + found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return nil, err + } + + uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) + if err != nil { + return nil, err + } + uncompressedDigest := digest.Digest(uncompressedDigestStr) + blobInfo := types.BlobInfo{ + Digest: uncompressedDigest, + Size: uncompressedSize, + MediaType: layerBlob.MediaType, + } + s.compressed[uncompressedDigest] = layerBlob.Digest + updatedBlobInfos = append(updatedBlobInfos, blobInfo) + } + return updatedBlobInfos, nil +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go new file mode 100644 index 0000000000..d83f85b90f --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -0,0 +1,242 @@ +//go:build containers_image_ostree +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/regexp" +) + +const defaultOSTreeRepo = "/ostree/repo" + +// Transport is an ImageTransport for ostree paths. +var Transport = ostreeTransport{} + +type ostreeTransport struct{} + +func (t ostreeTransport) Name() string { + return "ostree" +} + +func init() { + transports.Register(Transport) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { + sep := strings.Index(scope, ":") + if sep < 0 { + return fmt.Errorf("Invalid ostree: scope %s: Must include a repo", scope) + } + repo := scope[:sep] + + if !strings.HasPrefix(repo, "/") { + return fmt.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) + } + cleaned := filepath.Clean(repo) + if cleaned != repo { + return fmt.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + // FIXME? In the namespaces within a repo, + // we could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// ostreeReference is an ImageReference for ostree paths. +type ostreeReference struct { + image string + branchName string + repo string +} + +type ostreeImageCloser struct { + types.ImageCloser + size int64 +} + +func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { + var repo = "" + image, repoPart, gotRepoPart := strings.Cut(ref, "@/") + if !gotRepoPart { + repo = defaultOSTreeRepo + } else { + repo = "/" + repoPart + } + + return NewReference(image, repo) +} + +// NewReference returns an OSTree reference for a specified repo and image. +func NewReference(image string, repo string) (types.ImageReference, error) { + // image is not _really_ in a containers/image/docker/reference format; + // as far as the libOSTree ociimage/* namespace is concerned, it is more or + // less an arbitrary string with an implied tag. + // Parse the image using reference.ParseNormalizedNamed so that we can + // check whether the images has a tag specified and we can add ":latest" if needed + ostreeImage, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if reference.IsNameOnly(ostreeImage) { + image = image + ":latest" + } + + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) + if err != nil { + // With os.IsNotExist(err), the parent directory of repo is also not existent; + // that should ordinarily not happen, but it would be a bit weird to reject + // references which do not specify a repo just because the implicit defaultOSTreeRepo + // does not exist. + if os.IsNotExist(err) && repo == defaultOSTreeRepo { + resolved = repo + } else { + return nil, err + } + } + // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces + // from being ambiguous with values of PolicyConfigurationIdentity. + if strings.Contains(resolved, ":") { + return nil, fmt.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) + } + + return ostreeReference{ + image: image, + branchName: encodeOStreeRef(image), + repo: resolved, + }, nil +} + +func (ref ostreeReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ostreeReference) StringWithinTransport() string { + return fmt.Sprintf("%s@%s", ref.image, ref.repo) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ostreeReference) DockerReference() reference.Named { + return nil +} + +func (ref ostreeReference) PolicyConfigurationIdentity() string { + return fmt.Sprintf("%s:%s", ref.repo, ref.image) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ostreeReference) PolicyConfigurationNamespaces() []string { + repo, _, gotTag := strings.Cut(ref.image, ":") + if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. + panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) + } + name := repo + res := []string{} + for { + res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + return res +} + +func (s *ostreeImageCloser) Size() (int64, error) { + return s.size, nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageSource(tmpDir, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageDestination(ref, tmpDir) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for ostree: images") +} + +var ostreeRefRegexp = regexp.Delayed(`^[A-Za-z0-9.-]$`) + +func encodeOStreeRef(in string) string { + var buffer bytes.Buffer + for i := range in { + sub := in[i : i+1] + if ostreeRefRegexp.MatchString(sub) { + buffer.WriteString(sub) + } else { + buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) + } + + } + return buffer.String() +} + +// manifestPath returns a path for the manifest within a ostree using our conventions. +func (ref ostreeReference) manifestPath() string { + return filepath.Join("manifest", "manifest.json") +} + +// signaturePath returns a path for a signature within a ostree using our conventions. +func (ref ostreeReference) signaturePath(index int) string { + return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) +} diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go new file mode 100644 index 0000000000..70758ad439 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/load.go @@ -0,0 +1,210 @@ +package sif + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +// injectedScriptTargetPath is the path injectedScript should be written to in the created image. +const injectedScriptTargetPath = "/podman/runscript" + +// parseDefFile parses a SIF definition file from reader, +// and returns non-trivial contents of the %environment and %runscript sections. +func parseDefFile(reader io.Reader) ([]string, []string, error) { + type parserState int + const ( + parsingOther parserState = iota + parsingEnvironment + parsingRunscript + ) + + environment := []string{} + runscript := []string{} + + state := parsingOther + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + switch { + case s == `%environment`: + state = parsingEnvironment + case s == `%runscript`: + state = parsingRunscript + case strings.HasPrefix(s, "%"): + state = parsingOther + case state == parsingEnvironment: + if s != "" && !strings.HasPrefix(s, "#") { + environment = append(environment, s) + } + case state == parsingRunscript: + runscript = append(runscript, s) + default: // parsingOther: ignore the line + } + } + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err) + } + return environment, runscript, nil +} + +// generateInjectedScript generates a shell script based on +// SIF definition file %environment and %runscript data, and returns it. +func generateInjectedScript(environment []string, runscript []string) []byte { + script := fmt.Sprintf("#!/bin/bash\n"+ + "%s\n"+ + "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n")) + return []byte(script) +} + +// processDefFile finds sif.DataDeffile in sifImage, if any, +// and returns: +// - the command to run +// - contents of a script to inject as injectedScriptTargetPath, or nil +func processDefFile(sifImage *sif.FileImage) (string, []byte, error) { + var environment, runscript []string + + desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile)) + if err == nil { + environment, runscript, err = parseDefFile(desc.GetReader()) + if err != nil { + return "", nil, err + } + } + + var command string + var injectedScript []byte + if len(environment) == 0 && len(runscript) == 0 { + command = "bash" + injectedScript = nil + } else { + injectedScript = generateInjectedScript(environment, runscript) + command = injectedScriptTargetPath + } + + return command, injectedScript, nil +} + +func writeInjectedScript(extractedRootPath string, injectedScript []byte) error { + if injectedScript == nil { + return nil + } + filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath) + parentDirPath := filepath.Dir(filePath) + if err := os.MkdirAll(parentDirPath, 0755); err != nil { + return fmt.Errorf("creating %s: %w", parentDirPath, err) + } + if err := os.WriteFile(filePath, injectedScript, 0755); err != nil { + return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err) + } + return nil +} + +// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath. +// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use, +// if necessary. +func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error { + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + defer os.RemoveAll(extractedRootPath) + + // Almost everything in extractedRootPath comes from squashFSPath. + conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./", + extractedRootPath, squashFSPath, extractedRootPath, tarPath) + script := "#!/bin/sh\n" + conversionCommand + "\n" + if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil { + return err + } + defer os.Remove(scriptPath) + + // On top of squashFSPath, we only add injectedScript, if necessary. + if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil { + return err + } + + logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand) + cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("converting image: %w, output: %s", err, string(output)) + } + logrus.Debugf("... finished converting squashfs to tar") + return nil +} + +// convertSIFToElements processes sifImage and creates/returns +// the relevant elements for constructing an OCI-like image: +// - A path to a tar file containing a root filesystem, +// - A command to run. +// The returned tar file path is inside tempDir, which can be assumed to be empty +// at start, and is exclusively used by the current process (i.e. it is safe +// to use hard-coded relative paths within it). +func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) { + // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive, + // so we can just hard-code a set of unique values here. + // We create and/or manage cleanup of these two paths. + squashFSPath := filepath.Join(tempDir, "rootfs.squashfs") + tarPath := filepath.Join(tempDir, "rootfs.tar") + // We only allocate these paths, the user is responsible for cleaning them up. + extractedRootPath := filepath.Join(tempDir, "rootfs") + scriptPath := filepath.Join(tempDir, "script") + + succeeded := false + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need + // to run both creation and consumption of extractedRootPath in the same fakeroot context. + // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2 + // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time. + // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser + // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy. + defer os.Remove(squashFSPath) + defer func() { + if !succeeded { + os.Remove(tarPath) + } + }() + + command, injectedScript, err := processDefFile(sifImage) + if err != nil { + return "", nil, err + } + + rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys)) + if err != nil { + return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err) + } + // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4 + // has an -o option that allows extracting a squashfs from the SIF file directly, + // but that version is not currently available in RHEL 8. + logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath) + if err := func() error { // A scope for defer + f, err := os.Create(squashFSPath) + if err != nil { + return err + } + defer f.Close() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil { + return err + } + return nil + }(); err != nil { + return "", nil, err + } + logrus.Debugf("... finished creating a temporary squashfs image") + + if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil { + return "", nil, err + } + succeeded = true + return tarPath, []string{command}, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go new file mode 100644 index 0000000000..261cfbe771 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -0,0 +1,206 @@ +package sif + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +type sifImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + + ref sifReference + workDir string + layerDigest digest.Digest + layerSize int64 + layerFile string + config []byte + configDigest digest.Digest + manifest []byte +} + +// getBlobInfo returns the digest, and size of the provided file. +func getBlobInfo(path string) (digest.Digest, int64, error) { + f, err := os.Open(path) + if err != nil { + return "", -1, fmt.Errorf("opening %q for reading: %w", path, err) + } + defer f.Close() + + // TODO: Instead of writing the tar file to disk, and reading + // it here again, stream the tar file to a pipe and + // compute the digest while writing it to disk. + logrus.Debugf("Computing a digest of the SIF conversion output...") + digester := digest.Canonical.Digester() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(digester.Hash(), f) + if err != nil { + return "", -1, fmt.Errorf("reading %q: %w", path, err) + } + digest := digester.Digest() + logrus.Debugf("... finished computing the digest of the SIF conversion output") + + return digest, size, nil +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource extracts SIF objects and saves them in a temp directory. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (private.ImageSource, error) { + sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY)) + if err != nil { + return nil, fmt.Errorf("loading SIF file: %w", err) + } + defer func() { + _ = sifImg.UnloadContainer() + }() + + workDir, err := tmpdir.MkDirBigFileTemp(sys, "sif") + if err != nil { + return nil, fmt.Errorf("creating temp directory: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + os.RemoveAll(workDir) + } + }() + + layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir) + if err != nil { + return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err) + } + + layerDigest, layerSize, err := getBlobInfo(layerPath) + if err != nil { + return nil, fmt.Errorf("gathering blob information: %w", err) + } + + created := sifImg.ModifiedAt() + config := imgspecv1.Image{ + Created: &created, + Platform: imgspecv1.Platform{ + Architecture: sifImg.PrimaryArch(), + OS: "linux", + }, + Config: imgspecv1.ImageConfig{ + Cmd: commandLine, + }, + RootFS: imgspecv1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{layerDigest}, + }, + History: []imgspecv1.History{ + { + Created: &created, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator), + Comment: "imported from SIF, uuid: " + sifImg.ID(), + }, + { + Created: &created, + CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]", + EmptyLayer: true, + }, + }, + } + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err) + } + configDigest := digest.Canonical.FromBytes(configBytes) + + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: imgspecv1.Descriptor{ + Digest: configDigest, + Size: int64(len(configBytes)), + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: []imgspecv1.Descriptor{{ + Digest: layerDigest, + Size: layerSize, + MediaType: imgspecv1.MediaTypeImageLayer, + }}, + } + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err) + } + + succeeded = true + s := &sifImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: true, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + workDir: workDir, + layerDigest: layerDigest, + layerSize: layerSize, + layerFile: layerPath, + config: configBytes, + configDigest: configDigest, + manifest: manifestBytes, + } + s.Compat = impl.AddCompat(s) + return s, nil +} + +// Reference returns the reference used to set up this source. +func (s *sifImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *sifImageSource) Close() error { + return os.RemoveAll(s.workDir) +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + switch info.Digest { + case s.configDigest: + return io.NopCloser(bytes.NewReader(s.config)), int64(len(s.config)), nil + case s.layerDigest: + reader, err := os.Open(s.layerFile) + if err != nil { + return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err) + } + return reader, s.layerSize, nil + default: + return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String()) + } +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New("manifest lists are not supported by the sif transport") + } + return s.manifest, imgspecv1.MediaTypeImageManifest, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go new file mode 100644 index 0000000000..4c09010714 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -0,0 +1,160 @@ +package sif + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for SIF images. +var Transport = sifTransport{} + +type sifTransport struct{} + +func (t sifTransport) Name() string { + return "sif" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// sifReference is an ImageReference for SIF images. +type sifReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + file string // As specified by the user. May be relative, contain symlinks, etc. + resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no sif.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// sifTransport.ParseReference; callers who intentionally deal with SIF files +// can use sif.NewReference. + +// NewReference returns an image file reference for a specified path. +func NewReference(file string) (types.ImageReference, error) { + // We do not expose an API supplying the resolvedFile; we could, but recomputing it + // is generally cheap enough that we prefer being confident about the properties of resolvedFile. + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + return sifReference{file: file, resolvedFile: resolved}, nil +} + +func (ref sifReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref sifReference) StringWithinTransport() string { + return ref.file +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref sifReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref sifReference) PolicyConfigurationIdentity() string { + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref sifReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by sifTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.New(`"sif:" locations can only be read from, not written to`) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for sif: images") +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go new file mode 100644 index 0000000000..07e1d5e1f9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -0,0 +1,958 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chunked" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +var ( + // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value), + // and there is no known user of this error. + ErrBlobDigestMismatch = errors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = errors.New("blob size mismatch") +) + +type storageImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.ImplementsPutBlobPartial + stubs.AlwaysSupportsSignatures + + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + manifestDigest digest.Digest // Valid if len(manifest) != 0 + signatures []byte // Signature contents, temporary + signatureses map[digest.Digest][]byte // Instance signature contents, temporary + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice + + // A storage destination may be used concurrently. Accesses are + // serialized via a mutex. Please refer to the individual comments + // below for details. + lock sync.Mutex + // Mapping from layer (by index) to the associated ID in the storage. + // It's protected *implicitly* since `commitLayer()`, at any given + // time, can only be executed by *one* goroutine. Please refer to + // `queueOrCommit()` for further details on how the single-caller + // guarantee is implemented. + indexToStorageID map[int]*string + // All accesses to below data are protected by `lock` which is made + // *explicit* in the code. + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output +} + +// addedLayerInfo records data about a layer to use in this image. +type addedLayerInfo struct { + digest digest.Digest + emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { + directory, err := tmpdir.MkDirBigFileTemp(sys, "storage") + if err != nil { + return nil, fmt.Errorf("creating a temporary directory: %w", err) + } + dest := &storageImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, + }, + // We ultimately have to decompress layers to populate trees on disk + // and need to explicitly ask for it here, so that the layers' MIME + // types can be set accordingly. + DesiredLayerCompression: types.PreserveOriginal, + AcceptsForeignLayerURLs: false, + MustMatchRuntimeOS: true, + IgnoresEmbeddedDockerReference: true, // Yes, we want the unmodified manifest + HasThreadSafePutBlob: true, + }), + + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + indexToStorageID: make(map[int]*string), + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + } + dest.Compat = impl.AddCompat(dest) + return dest, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (s *storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up the temporary directory and additional layer store handlers. +func (s *storageImageDestination) Close() error { + for _, al := range s.blobAdditionalLayer { + al.Release() + } + for _, v := range s.diffOutputs { + if v.Target != "" { + _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target) + } + } + return os.RemoveAll(s.directory) +} + +func (s *storageImageDestination) computeNextBlobCacheFile() string { + return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1))) +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + info, err := s.putBlobToPendingFile(stream, blobinfo, &options) + if err != nil { + return info, err + } + + if options.IsConfig || options.LayerIndex == nil { + return info, nil + } + + return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{ + digest: info.Digest, + emptyLayer: options.EmptyLayer, + }) +} + +// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. +// The caller must arrange the blob to be eventually committed using s.commitLayer(). +func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) { + // Stores a layer or data blob in our temporary directory, checking that any information + // in the blobinfo matches the incoming data. + if blobinfo.Digest != "" { + if err := blobinfo.Digest.Validate(); err != nil { + return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) + } + } + + // Set up to digest the blob if necessary, and count its size while saving it to a file. + filename := s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err) + } + defer file.Close() + counter := ioutils.NewWriteCounter(file) + stream = io.TeeReader(stream, counter) + digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) + decompressed, err := archive.DecompressStream(stream) + if err != nil { + return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err) + } + + diffID := digest.Canonical.Digester() + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using context.Context. + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err) + } + + // Determine blob properties, and fail if information that we were given about the blob + // is known to be incorrect. + blobDigest := digester.Digest() + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count + } else if blobinfo.Size != counter.Count { + return private.UploadedBlob{}, ErrBlobSizeMismatch + } + + // Record information about the blob. + s.lock.Lock() + s.blobDiffIDs[blobDigest] = diffID.Digest() + s.fileSizes[blobDigest] = counter.Count + s.filenames[blobDigest] = filename + s.lock.Unlock() + // This is safe because we have just computed diffID, and blobDigest was either computed + // by us, or validated by the caller (usually copy.digestingReader). + options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + return private.UploadedBlob{ + Digest: blobDigest, + Size: blobSize, + }, nil +} + +type zstdFetcher struct { + chunkAccessor private.BlobChunkAccessor + ctx context.Context + blobInfo types.BlobInfo +} + +// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. +func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + newChunks := make([]private.ImageSourceChunk, 0, len(chunks)) + for _, v := range chunks { + i := private.ImageSourceChunk{ + Offset: v.Offset, + Length: v.Length, + } + newChunks = append(newChunks, i) + } + rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks) + if _, ok := err.(private.BadPartialRequestError); ok { + err = chunked.ErrBadRequest{} + } + return rc, errs, err + +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { + fetcher := zstdFetcher{ + chunkAccessor: chunkAccessor, + ctx: ctx, + blobInfo: srcInfo, + } + + differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) + if err != nil { + return private.UploadedBlob{}, err + } + + out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ) + if err != nil { + return private.UploadedBlob{}, err + } + + blobDigest := srcInfo.Digest + + s.lock.Lock() + s.blobDiffIDs[blobDigest] = blobDigest + s.fileSizes[blobDigest] = 0 + s.filenames[blobDigest] = "" + s.diffOutputs[blobDigest] = out + s.lock.Unlock() + + return private.UploadedBlob{ + Digest: blobDigest, + Size: srcInfo.Size, + }, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } + reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options) + if err != nil || !reused || options.LayerIndex == nil { + return reused, info, err + } + + return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{ + digest: info.Digest, + emptyLayer: options.EmptyLayer, + }) +} + +// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata. +// The caller must arrange the blob to be eventually committed using s.commitLayer(). +func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + // lock the entire method as it executes fairly quickly + s.lock.Lock() + defer s.lock.Unlock() + + if options.SrcRef != nil { + // Check if we have the layer in the underlying additional layer store. + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String()) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err) + } else if err == nil { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[digest] = aLayer + return true, private.ReusedBlob{ + Digest: digest, + Size: aLayer.CompressedSize(), + }, nil + } + } + + if digest == "" { + return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`) + } + if err := digest.Validate(); err != nil { + return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) + } + + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[digest]; ok { + return true, private.ReusedBlob{ + Digest: digest, + Size: size, + }, nil + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[digest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: digest, + Size: layers[0].UncompressedSize, + }, nil + } + + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[digest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: digest, + Size: layers[0].CompressedSize, + }, nil + } + + // Does the blob correspond to a known DiffID which we already have available? + // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the + // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. + if options.CanSubstitute || size != -1 { + if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest { + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) + } + if len(layers) > 0 { + if size != -1 { + s.blobDiffIDs[digest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: digest, + Size: size, + }, nil + } + if !options.CanSubstitute { + return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest) + } + s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, + }, nil + } + } + } + + // Nope, we don't have it. + return false, private.ReusedBlob{}, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m := m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + for i, compat := range m.ExtractedV1Compatibility { + if compat.ThrowAway { + continue + } + blobSum := m.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. + default: + return "" + } + id, err := m.ImageID(diffIDs) + if err != nil { + return "" + } + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.New(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := os.ReadFile(filename) + if err2 != nil { + return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") +} + +// queueOrCommit queues the specified layer to be committed to the storage. +// If no other goroutine is already committing layers, the layer and all +// subsequent layers (if already queued) will be committed to the storage. +func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error { + // NOTE: whenever the code below is touched, make sure that all code + // paths unlock the lock and to unlock it exactly once. + // + // Conceptually, the code is divided in two stages: + // + // 1) Queue in work by marking the layer as ready to be committed. + // If at least one previous/parent layer with a lower index has + // not yet been committed, return early. + // + // 2) Process the queued-in work by committing the "ready" layers + // in sequence. Make sure that more items can be queued-in + // during the comparatively I/O expensive task of committing a + // layer. + // + // The conceptual benefit of this design is that caller can continue + // pulling layers after an early return. At any given time, only one + // caller is the "worker" routine committing layers. All other routines + // can continue pulling and queuing in layers. + s.lock.Lock() + s.indexToAddedLayerInfo[index] = info + + // We're still waiting for at least one previous/parent layer to be + // committed, so there's nothing to do. + if index != s.currentIndex { + s.lock.Unlock() + return nil + } + + for { + info, ok := s.indexToAddedLayerInfo[index] + if !ok { + break + } + s.lock.Unlock() + // Note: commitLayer locks on-demand. + if err := s.commitLayer(index, info, -1); err != nil { + return err + } + s.lock.Lock() + index++ + } + + // Set the index at the very end to make sure that only one routine + // enters stage 2). + s.currentIndex = index + s.lock.Unlock() + return nil +} + +// commitLayer commits the specified layer with the given index to the storage. +// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs. +// +// Note that the previous layer is expected to already be committed. +// +// Caution: this function must be called without holding `s.lock`. Callers +// must guarantee that, at any given time, at most one goroutine may execute +// `commitLayer()`. +func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error { + // Already committed? Return early. + if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { + return nil + } + + // Start with an empty string or the previous layer ID. Note that + // `s.indexToStorageID` can only be accessed by *one* goroutine at any + // given time. Hence, we don't need to lock accesses. + var lastLayer string + if prev := s.indexToStorageID[index-1]; prev != nil { + lastLayer = *prev + } + + // Carry over the previous ID for empty non-base layers. + if info.emptyLayer { + s.indexToStorageID[index] = &lastLayer + return nil + } + + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + s.lock.Lock() + diffID, haveDiffID := s.blobDiffIDs[info.digest] + s.lock.Unlock() + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // that relies on using a blob digest that has never been seen by the store had better call + // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only + // so far we are going to accommodate that (if we should be doing that at all). + logrus.Debugf("looking for diffID for blob %+v", info.digest) + // Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit. + has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{ + Cache: none.NoCache, + CanSubstitute: false, + }) + if err != nil { + return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) + } + if !has { + return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[info.digest] + if !haveDiffID { + return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + s.indexToStorageID[index] = &lastLayer + return nil + } + + s.lock.Lock() + diffOutput, ok := s.diffOutputs[info.digest] + s.lock.Unlock() + if ok { + layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) + if err != nil { + return err + } + + // FIXME: what to do with the uncompressed digest? + diffOutput.UncompressedDigest = info.digest + + if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { + _ = s.imageRef.transport.store.Delete(layer.ID) + return err + } + + s.indexToStorageID[index] = &layer.ID + return nil + } + + s.lock.Lock() + al, ok := s.blobAdditionalLayer[info.digest] + s.lock.Unlock() + if ok { + layer, err := al.PutAs(id, lastLayer, nil) + if err != nil && !errors.Is(err, storage.ErrDuplicateID) { + return fmt.Errorf("failed to put layer from digest and labels: %w", err) + } + lastLayer = layer.ID + s.indexToStorageID[index] = &lastLayer + return nil + } + + // Check if we previously cached a file with that blob's contents. If we didn't, + // then we need to read the desired contents from a layer. + s.lock.Lock() + filename, ok := s.filenames[info.digest] + s.lock.Unlock() + if !ok { + // Try to find the layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2) + } + // Read the layer's contents. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2) + } + // Copy the layer diff to a file. Diff() takes a lock that it holds + // until the ReadCloser that it returns is closed, and PutLayer() wants + // the same lock, so the diff can't just be directly streamed from one + // to the other. + filename = s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + diff.Close() + return fmt.Errorf("creating temporary file %q: %w", filename, err) + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using + // ctx.Done(). + _, err = io.Copy(file, diff) + diff.Close() + file.Close() + if err != nil { + return fmt.Errorf("storing blob to file %q: %w", filename, err) + } + // Make sure that we can find this file later, should we need the layer's + // contents again. + s.lock.Lock() + s.filenames[info.digest] = filename + s.lock.Unlock() + } + // Read the cached blob and use it as a diff. + file, err := os.Open(filename) + if err != nil { + return fmt.Errorf("opening file %q: %w", filename, err) + } + defer file.Close() + // Build the new layer using the diff, regardless of where it came from. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ + OriginalDigest: info.digest, + UncompressedDigest: diffID, + }, file) + if err != nil && !errors.Is(err, storage.ErrDuplicateID) { + return fmt.Errorf("adding layer with blob %q: %w", info.digest, err) + } + + s.indexToStorageID[index] = &layer.ID + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + toplevelManifest, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return fmt.Errorf("retrieving top-level manifest: %w", err) + } + // If the name we're saving to includes a digest, then check that the + // manifests that we're about to save all either match the one from the + // unparsedToplevel, or match the digest in the name that we're using. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + matches, err := manifest.MatchesDigest(s.manifest, digested.Digest()) + if err != nil { + return err + } + if !matches { + matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest()) + if err != nil { + return err + } + } + if !matches { + return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest()) + } + } + } + // Find the list of layer blobs. + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return fmt.Errorf("parsing manifest: %w", err) + } + layerBlobs := man.LayerInfos() + + // Extract, commit, or find the layers. + for i, blob := range layerBlobs { + if err := s.commitLayer(i, addedLayerInfo{ + digest: blob.Digest, + emptyLayer: blob.EmptyLayer, + }, blob.Size); err != nil { + return err + } + } + var lastLayer string + if len(layerBlobs) > 0 { // Can happen when using caches + prev := s.indexToStorageID[len(layerBlobs)-1] + if prev == nil { + return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) + } + lastLayer = *prev + } + + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = *inspect.Created + } + + // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := set.New[digest.Digest]() + for blob := range s.filenames { + dataBlobs.Add(blob) + } + for _, layerBlob := range layerBlobs { + dataBlobs.Delete(layerBlob.Digest) + } + for _, blob := range dataBlobs.Values() { + v, err := os.ReadFile(s.filenames[blob]) + if err != nil { + return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) + } + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: blob.String(), + Data: v, + Digest: digest.Canonical.FromBytes(v), + }) + } + // Set up to save the unparsedToplevel's manifest if it differs from + // the per-platform one, which is saved below. + if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { + manifestDigest, err := manifest.Digest(toplevelManifest) + if err != nil { + return fmt.Errorf("digesting top-level manifest: %w", err) + } + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: manifestBigDataKey(manifestDigest), + Data: toplevelManifest, + Digest: manifestDigest, + }) + } + // Set up to save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. + // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, + // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: manifestBigDataKey(s.manifestDigest), + Data: s.manifest, + Digest: s.manifestDigest, + }) + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: storage.ImageDigestBigDataKey, + Data: s.manifest, + Digest: s.manifestDigest, + }) + // Set up to save the signatures, if we have any. + if len(s.signatures) > 0 { + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: "signatures", + Data: s.signatures, + Digest: digest.Canonical.FromBytes(s.signatures), + }) + } + for instanceDigest, signatures := range s.signatureses { + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: signatureBigDataKey(instanceDigest), + Data: signatures, + Digest: digest.Canonical.FromBytes(signatures), + }) + } + + // Set up to save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + return fmt.Errorf("encoding metadata for image: %w", err) + } + if len(metadata) != 0 { + options.Metadata = string(metadata) + } + + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) + if err != nil { + if !errors.Is(err, storage.ErrDuplicateID) { + logrus.Debugf("error creating image: %q", err) + return fmt.Errorf("creating image %q: %w", intendedID, err) + } + img, err = s.imageRef.transport.store.Image(intendedID) + if err != nil { + return fmt.Errorf("reading image %q: %w", intendedID, err) + } + if img.TopLayer != lastLayer { + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return fmt.Errorf("image with ID %q already exists, but uses a different top layer: %w", intendedID, storage.ErrDuplicateID) + } + logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) + // set the data items and metadata on the already-present image + // FIXME: this _replaces_ any "signatures" blobs and their + // sizes (tracked in the metadata) which might have already + // been present with new values, when ideally we'd find a way + // to merge them since they all apply to the same image + for _, data := range options.BigData { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil { + logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err) + return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err) + } + } + if options.Metadata != "" { + if err := s.imageRef.transport.store.SetMetadata(img.ID, options.Metadata); err != nil { + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return fmt.Errorf("saving metadata for image %q: %w", img.ID, err) + } + logrus.Debugf("saved image metadata %q", options.Metadata) + } + } else { + logrus.Debugf("created new image ID %q with metadata %q", img.ID, options.Metadata) + } + + // Clean up the unfinished image on any error. + // (Is this the right thing to do if the image has existed before?) + commitSucceeded := false + defer func() { + if !commitSucceeded { + logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames) + if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil { + logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err) + } + } + }() + + // Add the reference's name on the image. We don't need to worry about avoiding duplicate + // values because AddNames() will deduplicate the list that we pass to it. + if name := s.imageRef.DockerReference(); name != nil { + if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil { + return fmt.Errorf("adding names %v to image %q: %w", name, img.ID, err) + } + logrus.Debugf("added name %q to image %q", name, img.ID) + } + + commitSucceeded = true + return nil +} + +// PutManifest writes the manifest to the destination. +func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + s.manifest = slices.Clone(manifestBlob) + s.manifestDigest = digest + return nil +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + sizes := []int{} + sigblob := []byte{} + for _, sigWithFormat := range signatures { + sig, err := signature.Blob(sigWithFormat) + if err != nil { + return err + } + sizes = append(sizes, len(sig)) + sigblob = append(sigblob, sig...) + } + if instanceDigest == nil { + s.signatures = sigblob + s.SignatureSizes = sizes + if len(s.manifest) > 0 { + manifestDigest := s.manifestDigest + instanceDigest = &manifestDigest + } + } + if instanceDigest != nil { + s.signatureses[*instanceDigest] = sigblob + s.SignaturesSizes[*instanceDigest] = sizes + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go new file mode 100644 index 0000000000..ac09f3dbbc --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -0,0 +1,59 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "context" + + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrNoSuchImage is returned when we attempt to access an image which + // doesn't exist in the storage area. + ErrNoSuchImage = storage.ErrNotAnImage +) + +type storageImageCloser struct { + types.ImageCloser + size int64 +} + +// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; +// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey +func manifestBigDataKey(digest digest.Digest) string { + return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() +} + +// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; +func signatureBigDataKey(digest digest.Digest) string { + return "signature-" + digest.Encoded() +} + +// Size() returns the previously-computed size of the image, with no error. +func (s *storageImageCloser) Size() (int64, error) { + return s.size, nil +} + +// newImage creates an image that also knows its size +func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { + src, err := newImageSource(sys, s) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + return nil, err + } + size, err := src.getSize() + if err != nil { + return nil, err + } + return &storageImageCloser{ImageCloser: img, size: size}, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go new file mode 100644 index 0000000000..a55e34054a --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -0,0 +1,316 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "context" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte +// value hex-encoded into a 64-character string, and a reference to a Store +// where an image is, or would be, kept. +// Either "named" or "id" must be set. +type storageReference struct { + transport storageTransport + named reference.Named // may include a tag and/or a digest + id string +} + +func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { + if named == nil && id == "" { + return nil, ErrInvalidReference + } + if named != nil && reference.IsNameOnly(named) { + return nil, fmt.Errorf("reference %s has neither a tag nor a digest: %w", named.String(), ErrInvalidReference) + } + if id != "" { + if err := validateImageID(id); err != nil { + return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference) + } + } + // We take a copy of the transport, which contains a pointer to the + // store that it used for resolving this reference, so that the + // transport that we'll return from Transport() won't be affected by + // further calls to the original transport's SetStore() method. + return &storageReference{ + transport: transport, + named: named, + id: id, + }, nil +} + +// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref +func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { + repo := ref.Name() + return slices.ContainsFunc(image.Names, func(name string) bool { + if named, err := reference.ParseNormalizedNamed(name); err == nil && named.Name() == repo { + return true + } + return false + }) +} + +// multiArchImageMatchesSystemContext returns true if the passed-in image both contains a +// multi-arch manifest that matches the passed-in digest, and the image is the per-platform +// image instance that matches sys. +// +// See the comment in storageReference.ResolveImage explaining why +// this check is necessary. +func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool { + // Load the manifest that matches the specified digest. + // We don't need to care about storage.ImageDigestBigDataKey because + // manifests lists are only stored into storage by c/image versions + // that know about manifestBigDataKey, and only using that key. + key := manifestBigDataKey(manifestDigest) + manifestBytes, err := store.ImageBigData(img.ID, key) + if err != nil { + return false + } + // The manifest is either a list, or not a list. If it's a list, find + // the digest of the instance that matches the current system, and try + // to load that manifest from the image record, and use it. + manifestType := manifest.GuessMIMEType(manifestBytes) + if !manifest.MIMETypeIsMultiImage(manifestType) { + // manifestDigest directly specifies a per-platform image, so we aren't + // choosing among different variants. + return false + } + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return false + } + chosenInstance, err := list.ChooseInstance(sys) + if err != nil { + return false + } + key = manifestBigDataKey(chosenInstance) + _, err = store.ImageBigData(img.ID, key) + return err == nil // true if img.ID is based on chosenInstance. +} + +// Resolve the reference's name to an image ID in the store, if there's already +// one present with the same name or ID, and return the image. +// +// Returns an error matching ErrNoSuchImage if an image matching ref was not found. +func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) { + var loadedImage *storage.Image + if s.id == "" && s.named != nil { + // Look for an image that has the expanded reference name as an explicit Name value. + image, err := s.transport.store.Image(s.named.String()) + if image != nil && err == nil { + loadedImage = image + s.id = image.ID + } + } + if s.id == "" && s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + // + // Typically there should be at most one such image, because the same + // manifest digest implies the same config, and we choose the storage ID + // based on the config (deduplicating images), except: + // - the user can explicitly specify an ID when creating the image. + // In this case we don't have a preference among the alternatives. + // - when pulling an image from a multi-platform manifest list, we also + // store the manifest list in the image; this allows referencing a + // per-platform image using the manifest list digest, but that also + // means that we can have multiple genuinely different images in the + // storage matching the same manifest list digest (if pulled using different + // SystemContext.{OS,Architecture,Variant}Choice to the same storage). + // In this case we prefer the image matching the current SystemContext. + images, err := s.transport.store.ImagesByDigest(digested.Digest()) + if err == nil && len(images) > 0 { + for _, image := range images { + if imageMatchesRepo(image, s.named) { + if loadedImage == nil || multiArchImageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) { + loadedImage = image + s.id = image.ID + } + } + } + } + } + } + if s.id == "" { + logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) + return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage) + } + if loadedImage == nil { + img, err := s.transport.store.Image(s.id) + if err != nil { + return nil, fmt.Errorf("reading image %q: %w", s.id, err) + } + loadedImage = img + } + if s.named != nil { + if !imageMatchesRepo(loadedImage, s.named) { + logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) + return nil, ErrNoSuchImage + } + } + // Default to having the image digest that we hand back match the most recently + // added manifest... + if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok { + loadedImage.Digest = digest + } + // ... unless the named reference says otherwise, and it matches one of the digests + // in the image. For those cases, set the Digest field to that value, for the + // sake of older consumers that don't know there's a whole list in there now. + if s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + digest := digested.Digest() + if slices.Contains(loadedImage.Digests, digest) { + loadedImage.Digest = digest + } + } + } + return loadedImage, nil +} + +// Return a Transport object that defaults to using the same store that we used +// to build this reference object. +func (s storageReference) Transport() types.ImageTransport { + return &storageTransport{ + store: s.transport.store, + defaultUIDMap: s.transport.defaultUIDMap, + defaultGIDMap: s.transport.defaultGIDMap, + } +} + +// Return a name with a tag or digest, if we have either, else return it bare. +func (s storageReference) DockerReference() reference.Named { + return s.named +} + +// Return a name with a tag, prefixed with the graph root and driver name, to +// disambiguate between images which may be present in multiple stores and +// share only their names. +func (s storageReference) StringWithinTransport() string { + optionsList := "" + options := s.transport.store.GraphOptions() + if len(options) > 0 { + optionsList = ":" + strings.Join(options, ",") + } + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" + if s.named != nil { + res += s.named.String() + } + if s.id != "" { + res += "@" + s.id + } + return res +} + +func (s storageReference) PolicyConfigurationIdentity() string { + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + if s.named != nil { + res += s.named.String() + } + if s.id != "" { + res += "@" + s.id + } + return res +} + +// Also accept policy that's tied to the combination of the graph root and +// driver name, to apply to all images stored in the Store, and to just the +// graph root, in case we're using multiple drivers in the same directory for +// some reason. +func (s storageReference) PolicyConfigurationNamespaces() []string { + storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" + namespaces := []string{} + if s.named != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.named.String()) + } + tagged, isTagged := s.named.(reference.Tagged) + _, isDigested := s.named.(reference.Digested) + if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. + namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) + } + components := strings.Split(s.named.Name(), "/") + for len(components) > 0 { + namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) + components = components[:len(components)-1] + } + } + namespaces = append(namespaces, storeSpec) + namespaces = append(namespaces, driverlessStoreSpec) + return namespaces +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, s) +} + +func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + img, err := s.resolveImage(sys) + if err != nil { + return err + } + layers, err := s.transport.store.DeleteImage(img.ID, true) + if err == nil { + logrus.Debugf("deleted image %q", img.ID) + for _, layer := range layers { + logrus.Debugf("deleted layer %q", layer) + } + } + return err +} + +func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, s) +} + +func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, s) +} + +// ResolveReference finds the underlying storage image for a storage.Transport reference. +// It returns that image, and an updated reference which can be used to refer back to the _same_ +// image again. +// +// This matters if the input reference contains a tagged name; the destination of the tag can +// move in local storage. The updated reference returned by this function contains the resolved +// image ID, so later uses of that updated reference will either continue to refer to the same +// image, or fail. +// +// Note that it _is_ possible for the later uses to fail, either because the image was removed +// completely, or because the name used in the reference was untaged (even if the underlying image +// ID still exists in local storage). +// +// Returns an error matching ErrNoSuchImage if an image matching ref was not found. +func ResolveReference(ref types.ImageReference) (types.ImageReference, *storage.Image, error) { + sref, ok := ref.(*storageReference) + if !ok { + return nil, nil, fmt.Errorf("trying to resolve a non-%s: reference %q", Transport.Name(), + transports.ImageName(ref)) + } + clone := *sref // A shallow copy we can update + img, err := clone.resolveImage(nil) + if err != nil { + return nil, nil, err + } + return clone, img, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go new file mode 100644 index 0000000000..f1ce0861e0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -0,0 +1,403 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +type storageImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoGetBlobAtInitialize + + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice +} + +// newImageSource sets up an image for reading. +func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. + img, err := imageRef.resolveImage(sys) + if err != nil { + return nil, err + } + + // Build the reader object. + image := &storageImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: true, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef), + + imageRef: imageRef, + systemContext: sys, + image: img, + layerPosition: make(map[digest.Digest]int), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + } + image.Compat = impl.AddCompat(image) + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, fmt.Errorf("decoding metadata for source image: %w", err) + } + } + return image, nil +} + +// Reference returns the image reference that we used to find this image. +func (s *storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up any resources we tied up while reading the image. +func (s *storageImageSource) Close() error { + return nil +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { + // We need a valid digest value. + digest := info.Digest + err = digest.Validate() + if err != nil { + return nil, 0, err + } + + if digest == image.GzippedEmptyLayerDigest { + return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + } + + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest) + + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, digest.String()) + if err != nil { + return nil, 0, err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", digest.String()) + return io.NopCloser(r), int64(r.Len()), nil + } + + // NOTE: the blob is first written to a temporary file and subsequently + // closed. The intention is to keep the time we own the storage lock + // as short as possible to allow other processes to access the storage. + rc, n, _, err = s.getBlobAndLayerID(digest, layers) + if err != nil { + return nil, 0, err + } + defer rc.Close() + + tmpFile, err := tmpdir.CreateBigFileTemp(s.systemContext, "") + if err != nil { + return nil, 0, err + } + success := false + tmpFileRemovePending := true + defer func() { + if !success { + tmpFile.Close() + if tmpFileRemovePending { + os.Remove(tmpFile.Name()) + } + } + }() + // On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically + // cleaned up on process termination (or if the caller forgets to invoke Close()) + // On older versions of Windows we will have to fallback to relying on the caller to invoke Close() + if err := os.Remove(tmpFile.Name()); err != nil { + tmpFileRemovePending = false + } + + if _, err := io.Copy(tmpFile, rc); err != nil { + return nil, 0, err + } + if _, err := tmpFile.Seek(0, io.SeekStart); err != nil { + return nil, 0, err + } + + success = true + + if tmpFileRemovePending { + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + return os.Remove(tmpFile.Name()) + }), n, nil + } + + return tmpFile, n, nil +} + +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []storage.Layer) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + s.getBlobMutex.Lock() + i := s.layerPosition[digest] + s.layerPosition[digest] = i + 1 + s.getBlobMutex.Unlock() + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) { + if instanceDigest != nil { + key := manifestBigDataKey(*instanceDigest) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err) + } + return blob, manifest.GuessMIMEType(blob), err + } + if len(s.cachedManifest) == 0 { + // The manifest is stored as a big data item. + // Prefer the manifest corresponding to the user-specified digest, if available. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + key := manifestBigDataKey(digested.Digest()) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key + return nil, "", err + } + if err == nil { + s.cachedManifest = blob + } + } + } + // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. + // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). + if len(s.cachedManifest) == 0 { + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest) + if err != nil { + return nil, fmt.Errorf("reading image manifest for %q: %w", s.image.ID, err) + } + if manifest.MIMETypeIsMultiImage(manifestType) { + return nil, errors.New("can't copy layers for a manifest list (shouldn't be attempted)") + } + man, err := manifest.FromBlob(manifestBlob, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing image manifest for %q: %w", s.image.ID, err) + } + + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + } + + physicalBlobInfos := []types.BlobInfo{} + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err) + } + if layer.UncompressedDigest == "" { + return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID) + } + if layer.UncompressedSize < 0 { + return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID) + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) + layerID = layer.Parent + } + + res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) + if err != nil { + return nil, fmt.Errorf("creating LayerInfosForCopy of image %q: %w", s.image.ID, err) + } + return res, nil +} + +// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, +// but using layer data which we can actually produce — physicalInfos for non-empty layers, +// and image.GzippedEmptyLayer for empty ones. +// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) +func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { + nextPhysical := 0 + res := make([]types.BlobInfo, len(manifestInfos)) + for i, mi := range manifestInfos { + if mi.EmptyLayer { + res[i] = types.BlobInfo{ + Digest: image.GzippedEmptyLayerDigest, + Size: int64(len(image.GzippedEmptyLayer)), + MediaType: mi.MediaType, + } + } else { + if nextPhysical >= len(physicalInfos) { + return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) + } + res[i] = physicalInfos[nextPhysical] // FIXME? Should we preserve more data in manifestInfos? Notably the current approach correctly removes zstd:chunked metadata annotations. + nextPhysical++ + } + } + if nextPhysical != len(physicalInfos) { + return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) + } + return res, nil +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + var offset int + signatureBlobs := []byte{} + signatureSizes := s.SignatureSizes + key := "signatures" + instance := "default instance" + if instanceDigest != nil { + signatureSizes = s.SignaturesSizes[*instanceDigest] + key = signatureBigDataKey(*instanceDigest) + instance = instanceDigest.Encoded() + } + if len(signatureSizes) > 0 { + data, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, fmt.Errorf("looking up signatures data for image %q (%s): %w", s.image.ID, instance, err) + } + signatureBlobs = data + } + res := []signature.Signature{} + for _, length := range signatureSizes { + if offset+length > len(signatureBlobs) { + return nil, fmt.Errorf("looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signatureBlobs), offset+length) + } + sig, err := signature.FromBlob(signatureBlobs[offset : offset+length]) + if err != nil { + return nil, fmt.Errorf("parsing signature at (%d, %d): %w", offset, length, err) + } + res = append(res, sig) + offset += length + } + if offset != len(signatureBlobs) { + return nil, fmt.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatureBlobs)-offset) + } + return res, nil +} + +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) + if err != nil { + return -1, fmt.Errorf("reading image %q: %w", s.image.ID, err) + } + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) + if err != nil { + return -1, fmt.Errorf("reading data blob size %q for %q: %w", dataName, s.image.ID, err) + } + sum += bigSize + } + // Add the signature sizes. + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + // Walk the layer list. + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err + } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent + } + return sum, nil +} + +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go new file mode 100644 index 0000000000..deb500b4d2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -0,0 +1,416 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + minimumTruncatedIDLength = 3 +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport that uses either a default + // storage.Store or one that's it's explicitly told to use. + Transport StoreTransport = &storageTransport{} + // ErrInvalidReference is returned when ParseReference() is passed an + // empty reference. + ErrInvalidReference = errors.New("invalid reference") + // ErrPathNotAbsolute is returned when a graph root is not an absolute + // path name. + ErrPathNotAbsolute = errors.New("path name is not absolute") +) + +// StoreTransport is an ImageTransport that uses a storage.Store to parse +// references, either its own default or one that it's told to use. +type StoreTransport interface { + types.ImageTransport + // SetStore sets the default store for this transport. + SetStore(storage.Store) + // GetStoreIfSet returns the default store for this transport, or nil if not set/determined yet. + GetStoreIfSet() storage.Store + // GetImage retrieves the image from the transport's store that's named + // by the reference. + // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, + // this ignores that ID; and repeated calls of GetStoreImage with the same named reference + // can return different images, with no way for the caller to "freeze" the storage.Image identity + // without discarding the name entirely. + // + // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns + // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. + GetImage(types.ImageReference) (*storage.Image, error) + // GetStoreImage retrieves the image from a specified store that's named + // by the reference. + // + // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, + // this ignores that ID; and repeated calls of GetStoreImage with the same named reference + // can return different images, with no way for the caller to "freeze" the storage.Image identity + // without discarding the name entirely. + // + // Also, a StoreTransport reference already contains a store, so providing another one is redundant. + // + // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns + // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. + GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) + // ParseStoreReference parses a reference, overriding any store + // specification that it may contain. + ParseStoreReference(store storage.Store, reference string) (*storageReference, error) + // NewStoreReference creates a reference for (named@ID) in store. + // either of name or ID can be unset; named must not be a reference.IsNameOnly. + NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) + // SetDefaultUIDMap sets the default UID map to use when opening stores. + SetDefaultUIDMap(idmap []idtools.IDMap) + // SetDefaultGIDMap sets the default GID map to use when opening stores. + SetDefaultGIDMap(idmap []idtools.IDMap) + // DefaultUIDMap returns the default UID map used when opening stores. + DefaultUIDMap() []idtools.IDMap + // DefaultGIDMap returns the default GID map used when opening stores. + DefaultGIDMap() []idtools.IDMap +} + +type storageTransport struct { + store storage.Store + defaultUIDMap []idtools.IDMap + defaultGIDMap []idtools.IDMap +} + +func (s *storageTransport) Name() string { + // Still haven't really settled on a name. + return "containers-storage" +} + +// SetStore sets the Store object which the Transport will use for parsing +// references when information about a Store is not directly specified as part +// of the reference. If one is not set, the library will attempt to initialize +// one with default settings when a reference needs to be parsed. Calling +// SetStore does not affect previously parsed references. +func (s *storageTransport) SetStore(store storage.Store) { + s.store = store +} + +// GetStoreIfSet returns the default store for this transport, as set using SetStore() or initialized by default, or nil if not set/determined yet. +func (s *storageTransport) GetStoreIfSet() storage.Store { + return s.store +} + +// SetDefaultUIDMap sets the default UID map to use when opening stores. +func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { + s.defaultUIDMap = idmap +} + +// SetDefaultGIDMap sets the default GID map to use when opening stores. +func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { + s.defaultGIDMap = idmap +} + +// DefaultUIDMap returns the default UID map used when opening stores. +func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { + return s.defaultUIDMap +} + +// DefaultGIDMap returns the default GID map used when opening stores. +func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { + return s.defaultGIDMap +} + +// ParseStoreReference takes a name or an ID, tries to figure out which it is +// relative to the given store, and returns it in a reference object. +func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { + if ref == "" { + return nil, fmt.Errorf("%q is an empty reference: %w", ref, ErrInvalidReference) + } + if ref[0] == '[' { + // Ignore the store specifier. + closeIndex := strings.IndexRune(ref, ']') + if closeIndex < 1 { + return nil, fmt.Errorf("store specifier in %q did not end: %w", ref, ErrInvalidReference) + } + ref = ref[closeIndex+1:] + } + + // The reference may end with an image ID. Image IDs and digests use the same "@" separator; + // here we only peel away an image ID, and leave digests alone. + split := strings.LastIndex(ref, "@") + id := "" + if split != -1 { + possibleID := ref[split+1:] + if possibleID == "" { + return nil, fmt.Errorf("empty trailing digest or ID in %q: %w", ref, ErrInvalidReference) + } + // If it looks like a digest, leave it alone for now. + if _, err := digest.Parse(possibleID); err != nil { + // Otherwise… + if err := validateImageID(possibleID); err == nil { + id = possibleID // … it is a full ID + } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { + // … it is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } else { + return nil, fmt.Errorf("%q does not look like an image ID or digest: %w", possibleID, ErrInvalidReference) + } + // We have recognized an image ID; peel it off. + ref = ref[:split] + } + } + + // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { + if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + var named reference.Named + // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been + // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. + if ref != "" { + var err error + named, err = reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, fmt.Errorf("parsing named reference %q: %w", ref, err) + } + named = reference.TagNameOnly(named) + } + + result, err := s.NewStoreReference(store, named, id) + if err != nil { + return nil, err + } + logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) + return result, nil +} + +// NewStoreReference creates a reference for (named@ID) in store. +// either of name or ID can be unset; named must not be a reference.IsNameOnly. +func (s *storageTransport) NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) { + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) +} + +func (s *storageTransport) GetStore() (storage.Store, error) { + // Return the transport's previously-set store. If we don't have one + // of those, initialize one now. + if s.store == nil { + options, err := storage.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return nil, err + } + options.UIDMap = s.defaultUIDMap + options.GIDMap = s.defaultGIDMap + store, err := storage.GetStore(options) + if err != nil { + return nil, err + } + s.store = store + } + return s.store, nil +} + +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), +// possibly prefixed with a store specifier in the form "[_graphroot_]" or +// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or +// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", +// tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. +func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { + var store storage.Store + // Check if there's a store location prefix. If there is, then it + // needs to match a store that was previously initialized using + // storage.GetStore(), or be enough to let the storage library fill out + // the rest using knowledge that it has from elsewhere. + if len(reference) > 0 && reference[0] == '[' { + closeIndex := strings.IndexRune(reference, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + storeSpec := reference[1:closeIndex] + reference = reference[closeIndex+1:] + // Peel off a "driver@" from the start. + driverInfo := "" + driverPart1, driverPart2, gotDriver := strings.Cut(storeSpec, "@") + if !gotDriver { + storeSpec = driverPart1 + if storeSpec == "" { + return nil, ErrInvalidReference + } + } else { + driverInfo = driverPart1 + if driverInfo == "" { + return nil, ErrInvalidReference + } + storeSpec = driverPart2 + if storeSpec == "" { + return nil, ErrInvalidReference + } + } + // Peel off a ":options" from the end. + var options []string + storeSpec, optionsPart, gotOptions := strings.Cut(storeSpec, ":") + if gotOptions { + options = strings.Split(optionsPart, ",") + } + // Peel off a "+runroot" from the new end. + storeSpec, runRootInfo, _ := strings.Cut(storeSpec, "+") // runRootInfo is "" if there is no "+" + // The rest is our graph root. + rootInfo := storeSpec + // Check that any paths are absolute paths. + if rootInfo != "" && !filepath.IsAbs(rootInfo) { + return nil, ErrPathNotAbsolute + } + if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: driverInfo, + GraphRoot: rootInfo, + RunRoot: runRootInfo, + GraphDriverOptions: options, + UIDMap: s.defaultUIDMap, + GIDMap: s.defaultGIDMap, + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // We didn't have a store spec, so use the default. + store2, err := s.GetStore() + if err != nil { + return nil, err + } + store = store2 + } + return s.ParseStoreReference(store, reference) +} + +// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, +// this ignores that ID; and repeated calls of GetStoreImage with the same named reference +// can return different images, with no way for the caller to "freeze" the storage.Image identity +// without discarding the name entirely. +// +// Also, a StoreTransport reference already contains a store, so providing another one is redundant. +// +// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns +// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. +func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { + dref := ref.DockerReference() + if dref != nil { + if img, err := store.Image(dref.String()); err == nil { + return img, nil + } + } + if sref, ok := ref.(*storageReference); ok { + tmpRef := *sref + if img, err := tmpRef.resolveImage(nil); err == nil { + return img, nil + } + } + return nil, storage.ErrImageUnknown +} + +// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, +// this ignores that ID; and repeated calls of GetStoreImage with the same named reference +// can return different images, with no way for the caller to "freeze" the storage.Image identity +// without discarding the name entirely. +// +// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns +// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. +func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + return s.GetStoreImage(store, ref) +} + +func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { + // Check that there's a store location prefix. Values we're passed are + // expected to come from PolicyConfigurationIdentity or + // PolicyConfigurationNamespaces, so if there's no store location, + // something's wrong. + if scope[0] != '[' { + return ErrInvalidReference + } + // Parse the store location prefix. + closeIndex := strings.IndexRune(scope, ']') + if closeIndex < 1 { + return ErrInvalidReference + } + storeSpec := scope[1:closeIndex] + scope = scope[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return ErrPathNotAbsolute + } + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return ErrPathNotAbsolute + } + } else { + // Anything else: scope specified in a form we don't + // recognize. + return ErrInvalidReference + } + // That might be all of it, and that's okay. + if scope == "" { + return nil + } + + fields := strings.SplitN(scope, "@", 3) + switch len(fields) { + case 1: // name only + case 2: // name:tag@ID or name[:tag]@digest + if idErr := validateImageID(fields[1]); idErr != nil { + if _, digestErr := digest.Parse(fields[1]); digestErr != nil { + return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) + } + } + case 3: // name[:tag]@digest@ID + if _, err := digest.Parse(fields[1]); err != nil { + return err + } + if err := validateImageID(fields[2]); err != nil { + return err + } + default: // Coverage: This should never happen + return errors.New("Internal error: unexpected number of fields form strings.SplitN") + } + // As for field[0], if it is non-empty at all: + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// validateImageID returns nil if id is a valid (full) image ID, or an error +func validateImageID(id string) error { + _, err := digest.Parse("sha256:" + id) + return err +} diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go new file mode 100644 index 0000000000..064c78b177 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/doc.go @@ -0,0 +1,61 @@ +// Package tarball provides a way to generate images using one or more layer +// tarballs and an optional template configuration. +// +// An example: +// +// package main +// +// import ( +// "context" +// +// cp "github.com/containers/image/v5/copy" +// "github.com/containers/image/v5/signature" +// "github.com/containers/image/v5/tarball" +// "github.com/containers/image/v5/transports/alltransports" +// "github.com/containers/image/v5/types" +// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +// ) +// +// func imageFromTarball() { +// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// // - or - +// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// if err != nil { +// panic(err) +// } +// updater, ok := src.(tarball.ConfigUpdater) +// if !ok { +// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") +// } +// config := imgspecv1.Image{ +// Config: imgspecv1.ImageConfig{ +// Cmd: []string{"/bin/bash"}, +// }, +// } +// annotations := make(map[string]string) +// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" +// err = updater.ConfigUpdate(config, annotations) +// if err != nil { +// panic(err) +// } +// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") +// if err != nil { +// panic(err) +// } +// +// policy, err := signature.DefaultPolicy(nil) +// if err != nil { +// panic(err) +// } +// +// pc, err := signature.NewPolicyContext(policy) +// if err != nil { +// panic(err) +// } +// defer pc.Destroy() +// _, err = cp.Image(context.TODO(), pc, dest, src, nil) +// if err != nil { +// panic(err) +// } +// } +package tarball diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go new file mode 100644 index 0000000000..d5578d9e8a --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -0,0 +1,82 @@ +package tarball + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" +) + +// ConfigUpdater is an interface that ImageReferences for "tarball" images also +// implement. It can be used to set values for a configuration, and to set +// image annotations which will be present in the images returned by the +// reference's NewImage() or NewImageSource() methods. +type ConfigUpdater interface { + ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error +} + +type tarballReference struct { + config imgspecv1.Image + annotations map[string]string + filenames []string + stdin []byte +} + +// ConfigUpdate updates the image's default configuration and adds annotations +// which will be visible in source images created using this reference. +func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { + r.config = config + if r.annotations == nil { + r.annotations = make(map[string]string) + } + maps.Copy(r.annotations, annotations) + return nil +} + +func (r *tarballReference) Transport() types.ImageTransport { + return Transport +} + +func (r *tarballReference) StringWithinTransport() string { + return strings.Join(r.filenames, ":") +} + +func (r *tarballReference) DockerReference() reference.Named { + return nil +} + +func (r *tarballReference) PolicyConfigurationIdentity() string { + return "" +} + +func (r *tarballReference) PolicyConfigurationNamespaces() []string { + return nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, r) +} + +func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + for _, filename := range r.filenames { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing %q: %w", filename, err) + } + } + return nil +} + +func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) +} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go new file mode 100644 index 0000000000..6f9bfaf756 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -0,0 +1,234 @@ +package tarball + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/types" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" +) + +type tarballImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + + reference tarballReference + blobs map[digest.Digest]tarballBlob + manifest []byte +} + +// tarballBlob is a blob that tarballImagSource can return by GetBlob. +type tarballBlob struct { + contents []byte // or nil to read from filename below + filename string // valid if contents == nil + size int64 +} + +func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + + // Gather up the digests, sizes, and history information for all of the files. + blobs := map[digest.Digest]tarballBlob{} + diffIDs := []digest.Digest{} + created := time.Time{} + history := []imgspecv1.History{} + layerDescriptors := []imgspecv1.Descriptor{} + for _, filename := range r.filenames { + var reader io.Reader + var blobTime time.Time + var blob tarballBlob + if filename == "-" { + reader = bytes.NewReader(r.stdin) + blobTime = time.Now() + blob = tarballBlob{ + contents: r.stdin, + size: int64(len(r.stdin)), + } + } else { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + reader = file + fileinfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("error reading size of %q: %w", filename, err) + } + blobTime = fileinfo.ModTime() + blob = tarballBlob{ + filename: filename, + size: fileinfo.Size(), + } + } + + // Default to assuming the layer is compressed. + layerType := imgspecv1.MediaTypeImageLayerGzip + + // Set up to digest the file as it is. + blobIDdigester := digest.Canonical.Digester() + reader = io.TeeReader(reader, blobIDdigester.Hash()) + + // Set up to digest the file after we maybe decompress it. + diffIDdigester := digest.Canonical.Digester() + uncompressed, err := pgzip.NewReader(reader) + if err == nil { + // It is compressed, so the diffID is the digest of the uncompressed version + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + uncompressed = nil + } + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + if _, err := io.Copy(io.Discard, reader); err != nil { + return nil, fmt.Errorf("error reading %q: %v", filename, err) + } + if uncompressed != nil { + uncompressed.Close() + } + + // Grab our uncompressed and possibly-compressed digests and sizes. + diffID := diffIDdigester.Digest() + blobID := blobIDdigester.Digest() + diffIDs = append(diffIDs, diffID) + blobs[blobID] = blob + + history = append(history, imgspecv1.History{ + Created: &blobTime, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator), + Comment: comment, + }) + // Use the mtime of the most recently modified file as the image's creation time. + if created.Before(blobTime) { + created = blobTime + } + + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobID, + Size: blob.size, + MediaType: layerType, + }) + } + + // Pick up other defaults from the config in the reference. + config := r.config + if config.Created == nil { + config.Created = &created + } + if config.Architecture == "" { + config.Architecture = runtime.GOARCH + } + if config.OS == "" { + config.OS = runtime.GOOS + } + config.RootFS = imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + config.History = history + + // Encode and digest the image configuration blob. + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + } + configID := digest.Canonical.FromBytes(configBytes) + blobs[configID] = tarballBlob{ + contents: configBytes, + size: int64(len(configBytes)), + } + + // Populate a manifest with the configuration blob and the layers. + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{ + SchemaVersion: 2, + }, + Config: imgspecv1.Descriptor{ + Digest: configID, + Size: int64(len(configBytes)), + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: layerDescriptors, + Annotations: maps.Clone(r.annotations), + } + + // Encode the manifest. + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + } + + // Return the image. + src := &tarballImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(r), + + reference: *r, + blobs: blobs, + manifest: manifestBytes, + } + src.Compat = impl.AddCompat(src) + + return src, nil +} + +func (is *tarballImageSource) Close() error { + return nil +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + blob, ok := is.blobs[blobinfo.Digest] + if !ok { + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) + } + if blob.contents != nil { + return io.NopCloser(bytes.NewReader(blob.contents)), int64(len(blob.contents)), nil + } + reader, err := os.Open(blob.filename) + if err != nil { + return nil, -1, err + } + return reader, blob.size, nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return is.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +func (is *tarballImageSource) Reference() types.ImageReference { + return &is.reference +} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go new file mode 100644 index 0000000000..63d835530b --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -0,0 +1,75 @@ +package tarball + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +const ( + transportName = "tarball" + separator = ":" +) + +var ( + // Transport implements the types.ImageTransport interface for "tarball:" images, + // which are makeshift images constructed using one or more possibly-compressed tar + // archives. + Transport = &tarballTransport{} +) + +type tarballTransport struct { +} + +func (t *tarballTransport) Name() string { + return transportName +} + +func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { + var stdin []byte + var err error + filenames := strings.Split(reference, separator) + for _, filename := range filenames { + if filename == "-" { + stdin, err = io.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("error buffering stdin: %v", err) + } + continue + } + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q: %v", filename, err) + } + f.Close() + } + return NewReference(filenames, stdin) +} + +// NewReference creates a new "tarball:" reference for the listed fileNames. +// If any of the fileNames is "-", the contents of stdin are used instead. +func NewReference(fileNames []string, stdin []byte) (types.ImageReference, error) { + for _, path := range fileNames { + if strings.Contains(path, separator) { + return nil, fmt.Errorf("Invalid path %q: paths including the separator %q are not supported", path, separator) + } + } + return &tarballReference{ + filenames: fileNames, + stdin: stdin, + }, nil +} + +func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`tarball: does not support any scopes except the default "" one`) +} + +func init() { + transports.Register(Transport) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go new file mode 100644 index 0000000000..a8f1c13adc --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -0,0 +1,49 @@ +package alltransports + +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + + // Register all known transports. + // NOTE: Make sure docs/containers-transports.5.md and docs/containers-policy.json.5.md are updated when adding or updating + // a transport. + _ "github.com/containers/image/v5/directory" + _ "github.com/containers/image/v5/docker" + _ "github.com/containers/image/v5/docker/archive" + _ "github.com/containers/image/v5/oci/archive" + _ "github.com/containers/image/v5/oci/layout" + _ "github.com/containers/image/v5/openshift" + _ "github.com/containers/image/v5/sif" + _ "github.com/containers/image/v5/tarball" + // The docker-daemon transport is registeredy by docker_daemon*.go + // The ostree transport is registered by ostree*.go + // The storage transport is registered by storage*.go +) + +// ParseImageName converts a URL-like image name to a types.ImageReference. +func ParseImageName(imgName string) (types.ImageReference, error) { + // Keep this in sync with TransportFromImageName! + transportName, withinTransport, valid := strings.Cut(imgName, ":") + if !valid { + return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) + } + transport := transports.Get(transportName) + if transport == nil { + return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName) + } + return transport.ParseReference(withinTransport) +} + +// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when +// the transport is unknown or when the input is invalid. +func TransportFromImageName(imageName string) types.ImageTransport { + // Keep this in sync with ParseImageName! + transportName, _, valid := strings.Cut(imageName, ":") + if valid { + return transports.Get(transportName) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go new file mode 100644 index 0000000000..ffac6e0b8a --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go @@ -0,0 +1,9 @@ +//go:build !containers_image_docker_daemon_stub +// +build !containers_image_docker_daemon_stub + +package alltransports + +import ( + // Register the docker-daemon transport + _ "github.com/containers/image/v5/docker/daemon" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go new file mode 100644 index 0000000000..ddc347bf35 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go @@ -0,0 +1,10 @@ +//go:build containers_image_docker_daemon_stub +// +build containers_image_docker_daemon_stub + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("docker-daemon")) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go new file mode 100644 index 0000000000..2340702bdc --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go @@ -0,0 +1,9 @@ +//go:build containers_image_ostree && linux +// +build containers_image_ostree,linux + +package alltransports + +import ( + // Register the ostree transport + _ "github.com/containers/image/v5/ostree" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go new file mode 100644 index 0000000000..8c4175188f --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go @@ -0,0 +1,10 @@ +//go:build !containers_image_ostree || !linux +// +build !containers_image_ostree !linux + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("ostree")) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go new file mode 100644 index 0000000000..1e399cdb02 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go @@ -0,0 +1,9 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package alltransports + +import ( + // Register the storage transport + _ "github.com/containers/image/v5/storage" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go new file mode 100644 index 0000000000..30802661f1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go @@ -0,0 +1,10 @@ +//go:build containers_image_storage_stub +// +build containers_image_storage_stub + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("containers-storage")) +} diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml new file mode 100644 index 0000000000..c41dd5da2c --- /dev/null +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -0,0 +1,186 @@ +--- + +# Main collection of env. vars to set for all tasks and scripts. +env: + #### + #### Global variables used for all tasks + #### + # Overrides default location (/tmp/cirrus) for repo clone + CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/storage" + # Shell used to execute all script commands + CIRRUS_SHELL: "/bin/bash" + # Automation script path relative to $CIRRUS_WORKING_DIR) + SCRIPT_BASE: "./contrib/cirrus" + # No need to go crazy, but grab enough to cover most PRs + CIRRUS_CLONE_DEPTH: 50 + + #### + #### Cache-image names to test with (double-quotes around names are critical) + ### + FEDORA_NAME: "fedora-39ß" + DEBIAN_NAME: "debian-13" + + # GCE project where images live + IMAGE_PROJECT: "libpod-218412" + # VM Image built in containers/automation_images + IMAGE_SUFFIX: "c20231004t194547z-f39f38d13" + FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" + DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" + + #### + #### Command variables to help avoid duplication + #### + # Command to prefix every output line with a timestamp + # (can't do inline awk script, Cirrus-CI or YAML mangles quoting) + _TIMESTAMP: 'awk --file ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk' + _DFCMD: 'df -lhTx tmpfs' + _RAUDITCMD: 'cat /var/log/audit/audit.log' + _UAUDITCMD: 'cat /var/log/kern.log' + _JOURNALCMD: 'journalctl -b' + +gcp_credentials: ENCRYPTED[c87717f04fb15499d19a3b3fa0ad2cdedecc047e82967785d101e9bc418e93219f755e662feac8390088a2df1a4d8464] + +# Default timeout for each task +timeout_in: 120m + +# Default VM to use unless set or modified by task +gce_instance: + image_project: "${IMAGE_PROJECT}" + zone: "us-central1-b" # Required by Cirrus for the time being + cpu: 2 + memory: "4Gb" + disk: 200 + image_name: "${FEDORA_CACHE_IMAGE_NAME}" + + +linux_testing: &linux_testing + depends_on: + - lint + gce_instance: # Only need to specify differences from defaults (above) + image_name: "${VM_IMAGE}" + + # Separate scripts for separate outputs, makes debugging easier. + setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}' + + always: + df_script: '${_DFCMD} || true' + rh_audit_log_script: '${_RAUDITCMD} || true' + debian_audit_log_script: '${_UAUDITCMD} || true' + journal_log_script: '${_JOURNALCMD} || true' + + +fedora_testing_task: &fedora_testing + <<: *linux_testing + alias: fedora_testing + name: &std_test_name "${OS_NAME} ${TEST_DRIVER}" + env: + OS_NAME: "${FEDORA_NAME}" + VM_IMAGE: "${FEDORA_CACHE_IMAGE_NAME}" + # Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types. + matrix: &test_matrix + - env: + TEST_DRIVER: "vfs" + - env: + TEST_DRIVER: "overlay" + - env: + TEST_DRIVER: "overlay-transient" + - env: + TEST_DRIVER: "fuse-overlay" + - env: + TEST_DRIVER: "fuse-overlay-whiteout" + - env: + TEST_DRIVER: "btrfs" + + +# aufs was dropped between 20.04 and 22.04, can't test it +debian_testing_task: &debian_testing + <<: *linux_testing + alias: debian_testing + name: *std_test_name + env: + OS_NAME: "${DEBIAN_NAME}" + VM_IMAGE: "${DEBIAN_CACHE_IMAGE_NAME}" + # Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types. + matrix: + - env: + TEST_DRIVER: "vfs" + - env: + TEST_DRIVER: "overlay" + - env: + TEST_DRIVER: "fuse-overlay" + - env: + TEST_DRIVER: "fuse-overlay-whiteout" + - env: + TEST_DRIVER: "btrfs" + + +lint_task: + env: + CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" + container: + image: golang + modules_cache: + fingerprint_script: cat go.sum + folder: $GOPATH/pkg/mod + build_script: | + apt-get update + apt-get install -y libbtrfs-dev libdevmapper-dev + test_script: | + make TAGS=regex_precompile local-validate + make lint + make clean + + +# Update metadata on VM images referenced by this repository state +meta_task: + + container: + image: "quay.io/libpod/imgts:latest" + cpu: 1 + memory: 1 + + env: + # Space-separated list of images used by this repository state + IMGNAMES: |- + ${FEDORA_CACHE_IMAGE_NAME} + ${DEBIAN_CACHE_IMAGE_NAME} + BUILDID: "${CIRRUS_BUILD_ID}" + REPOREF: "${CIRRUS_CHANGE_IN_REPO}" + GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826] + GCPNAME: ENCRYPTED[91cf7aa421858b26b67835978d224b4a5c46afcf52a0f1ec1b69a99b248715dc8e92a1b56fde18e092acf256fa80ae9c] + GCPPROJECT: ENCRYPTED[79b0f7eb5958e25bc7095d5d368fa8d94447a43ffacb9c693de438186e2f767b7efe9563d6954297ae4730220e10aa9c] + CIRRUS_CLONE_DEPTH: 1 # source not used + + script: '/usr/local/bin/entrypoint.sh |& ${_TIMESTAMP}' + + +vendor_task: + container: + image: golang + modules_cache: + fingerprint_script: cat go.sum + folder: $GOPATH/pkg/mod + build_script: make vendor + test_script: hack/tree_status.sh + + +cross_task: + container: + image: golang:1.19 + build_script: make cross + + +# Represent overall pass/fail status from required dependent tasks +success_task: + depends_on: + - lint + - fedora_testing + - debian_testing + - meta + - vendor + - cross + container: + image: golang:1.19 + clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed + script: /bin/true diff --git a/vendor/github.com/containers/storage/.dockerignore b/vendor/github.com/containers/storage/.dockerignore new file mode 100644 index 0000000000..9bd2c02193 --- /dev/null +++ b/vendor/github.com/containers/storage/.dockerignore @@ -0,0 +1,3 @@ +bundles +.gopath +vendor/pkg diff --git a/vendor/github.com/containers/storage/.gitignore b/vendor/github.com/containers/storage/.gitignore new file mode 100644 index 0000000000..99b40fbde5 --- /dev/null +++ b/vendor/github.com/containers/storage/.gitignore @@ -0,0 +1,32 @@ +# containers/storage project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.1 +*.5 +*.exe +*~ +*.orig +*.test +.*.swp +.DS_Store +.idea* +# a .bashrc may be added to customize the build environment +.bashrc +.gopath/ +docs/AWS_S3_BUCKET +docs/GITCOMMIT +docs/GIT_BRANCH +docs/VERSION +docs/_build +docs/_static +docs/_templates +docs/changed-files +# generated by man/md2man-all.sh +man/man1 +man/man5 +man/man8 +tests/tools/build +vendor/pkg/ +.vagrant +/containers-storage +/containers-storage.* diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml new file mode 100644 index 0000000000..20968466c3 --- /dev/null +++ b/vendor/github.com/containers/storage/.golangci.yml @@ -0,0 +1,11 @@ +--- +run: + concurrency: 6 + deadline: 5m + skip-dirs-use-default: true +linters: + enable: + - gofumpt + disable: + - errcheck + - staticcheck diff --git a/vendor/github.com/containers/storage/.mailmap b/vendor/github.com/containers/storage/.mailmap new file mode 100644 index 0000000000..0527b6d84d --- /dev/null +++ b/vendor/github.com/containers/storage/.mailmap @@ -0,0 +1,254 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni jpetazzo +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit +Sven Dowideit + +Alexander Morozov +Alexander Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + + +Francisco Carriedo + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle + + + + +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Jacob Atzen +Jeff Nickoloff +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +Madhu Venugopal +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin +Allen Sun +Adrien Gallouët + +Anuj Bahuguna +Anusha Ragunathan +Avi Miller +Brent Salisbury +Chander G +Chun Chen +Ying Li +Daehyeok Mun + +Daniel, Dao Quang Minh +Daniel Nephin +Dave Tucker +Doug Tangren +Frederick F. Kautz IV +Ben Golub +Harold Cooper +hsinko <21551195@zju.edu.cn> +Josh Hawn +Justin Cormack + + +Kamil Domański +Lei Jitang + +Linus Heckemann + +Lynda O'Leary + +Marianna Tessel +Michael Huettermann +Moysés Borges + +Nigel Poulton +Qiang Huang + +Boaz Shuster +Shuwei Hao + +Soshi Katsuta + +Stefan Berger + +Stephen Day + +Toli Kuznets +Tristan Carel + +Vincent Demeester + +Vishnu Kannan +xlgao-zju xlgao +yuchangchun y00277921 + + + + +Hao Shu Wei + + + + + + + +Shengbo Song mYmNeo +Shengbo Song + +Sylvain Bellemare + diff --git a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md new file mode 100644 index 0000000000..f4f7df4b8c --- /dev/null +++ b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The Containers Storage Project Community Code of Conduct + +The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/storage/CONTRIBUTING.md b/vendor/github.com/containers/storage/CONTRIBUTING.md new file mode 100644 index 0000000000..5364be769e --- /dev/null +++ b/vendor/github.com/containers/storage/CONTRIBUTING.md @@ -0,0 +1,144 @@ +# Contributing to Containers/Storage + +We'd love to have you join the community! Below summarizes the processes +that we follow. + +## Topics + +* [Reporting Issues](#reporting-issues) +* [Submitting Pull Requests](#submitting-pull-requests) +* [Communications](#communications) + + +## Reporting Issues + +Before reporting an issue, check our backlog of +[open issues](https://github.com/containers/storage/issues) +to see if someone else has already reported it. If so, feel free to add +your scenario, or additional information, to the discussion. Or simply +"subscribe" to it to be notified when it is updated. + +If you find a new issue with the project we'd love to hear about it! The most +important aspect of a bug report is that it includes enough information for +us to reproduce it. So, please include as much detail as possible and try +to remove the extra stuff that doesn't really relate to the issue itself. +The easier it is for us to reproduce it, the faster it'll be fixed! + +Please don't include any private/sensitive information in your issue! + +## Submitting Pull Requests + +No Pull Request (PR) is too small! Typos, additional comments in the code, +new testcases, bug fixes, new features, more documentation, ... it's all +welcome! + +While bug fixes can first be identified via an "issue", that is not required. +It's ok to just open up a PR with the fix, but make sure you include the same +information you would have included in an issue - like how to reproduce it. + +PRs for new features should include some background on what use cases the +new code is trying to address. When possible and when it makes sense, try to break-up +larger PRs into smaller ones - it's easier to review smaller +code changes. But only if those smaller ones make sense as stand-alone PRs. + +Regardless of the type of PR, all PRs should include: +* well documented code changes +* additional testcases. Ideally, they should fail w/o your code change applied +* documentation changes + +Squash your commits into logical pieces of work that might want to be reviewed +separate from the rest of the PRs. But, squashing down to just one commit is ok +too since in the end the entire PR will be reviewed anyway. When in doubt, +squash. + +PRs that fix issues should include a reference like `Closes #XXXX` in the +commit message so that github will automatically close the referenced issue +when the PR is merged. + + + +### Sign your PRs + +The sign-off is a line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +## Communications + +For general questions, or discussions, please use the +IRC group on `irc.freenode.net` called `container-projects` +that has been setup. + +For discussions around issues/bugs and features, you can use the github +[issues](https://github.com/containers/storage/issues) +and +[PRs](https://github.com/containers/storage/pulls) +tracking system. + + diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile new file mode 100644 index 0000000000..77189d49ea --- /dev/null +++ b/vendor/github.com/containers/storage/Makefile @@ -0,0 +1,94 @@ +.PHONY: \ + all \ + binary \ + clean \ + codespell \ + containers-storage \ + cross \ + default \ + docs \ + gccgo \ + help \ + install \ + install.docs \ + install.tools \ + lint \ + local-binary \ + local-cross \ + local-gccgo \ + local-test \ + local-test-integration \ + local-test-unit \ + local-validate \ + test-integration \ + test-unit \ + validate \ + vendor \ + vendor-in-container + +NATIVETAGS := +AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) +BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) +GO ?= go +TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) + +default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs + +clean: ## remove all built files + $(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5 + +containers-storage: ## build using gc on the host + $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage + +codespell: + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w + +binary local-binary: containers-storage + +local-gccgo gccgo: ## build using gccgo on the host + GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage + +local-cross cross: ## cross build the binaries for arm, darwin, and freebsd + @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/riscv64 linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ + os=`echo $${target} | cut -f1 -d/` ; \ + arch=`echo $${target} | cut -f2 -d/` ; \ + suffix=$${os}.$${arch} ; \ + echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \ + env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \ + done + +docs: install.tools ## build the docs on the host + $(MAKE) -C docs docs + +local-test: local-binary local-test-unit local-test-integration ## build the binaries and run the tests + +local-test-unit test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) + @$(GO) test -count 1 $(BUILDFLAGS) $(TESTFLAGS) ./... + +local-test-integration test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) + @cd tests; ./test_runner.bash + +local-validate validate: install.tools ## validate DCO on the host + @./hack/git-validation.sh + +install.tools: + $(MAKE) -C tests/tools + +install.docs: docs + $(MAKE) -C docs install + +install: install.docs + +lint: install.tools + tests/tools/build/golangci-lint run --build-tags="$(AUTOTAGS) $(TAGS)" + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +vendor-in-container: + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor + +vendor: + $(GO) mod tidy + $(GO) mod vendor + $(GO) mod verify diff --git a/vendor/github.com/containers/storage/OWNERS b/vendor/github.com/containers/storage/OWNERS new file mode 100644 index 0000000000..c169581dec --- /dev/null +++ b/vendor/github.com/containers/storage/OWNERS @@ -0,0 +1,32 @@ +approvers: + - Luap99 + - TomSweeneyRedHat + - cevich + - edsantiago + - flouthoc + - giuseppe + - haircommander + - kolyshkin + - mrunalp + - mtrmac + - nalind + - rhatdan + - saschagrunert + - umohnani8 + - vrothberg +reviewers: + - Luap99 + - TomSweeneyRedHat + - cevich + - edsantiago + - flouthoc + - giuseppe + - haircommander + - kolyshkin + - mrunalp + - mtrmac + - nalind + - rhatdan + - saschagrunert + - umohnani8 + - vrothberg diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md new file mode 100644 index 0000000000..fef46a6893 --- /dev/null +++ b/vendor/github.com/containers/storage/README.md @@ -0,0 +1,46 @@ +`storage` is a Go library which aims to provide methods for storing filesystem +layers, container images, and containers. A `containers-storage` CLI wrapper +is also included for manual and scripting use. + +To build the CLI wrapper, use 'make binary'. + +Operations which use VMs expect to launch them using 'vagrant', defaulting to +using its 'libvirt' provider. The boxes used are also available for the +'virtualbox' provider, and can be selected by setting $VAGRANT_PROVIDER to +'virtualbox' before kicking off the build. + +The library manages three types of items: layers, images, and containers. + +A *layer* is a copy-on-write filesystem which is notionally stored as a set of +changes relative to its *parent* layer, if it has one. A given layer can only +have one parent, but any layer can be the parent of multiple layers. Layers +which are parents of other layers should be treated as read-only. + +An *image* is a reference to a particular layer (its _top_ layer), along with +other information which the library can manage for the convenience of its +caller. This information typically includes configuration templates for +running a binary contained within the image's layers, and may include +cryptographic signatures. Multiple images can reference the same layer, as the +differences between two images may not be in their layer contents. + +A *container* is a read-write layer which is a child of an image's top layer, +along with information which the library can manage for the convenience of its +caller. This information typically includes configuration information for +running the specific container. Multiple containers can be derived from a +single image. + +Layers, images, and containers are represented primarily by 32 character +hexadecimal IDs, but items of each kind can also have one or more arbitrary +names attached to them, which the library will automatically resolve to IDs +when they are passed in to API calls which expect IDs. + +The library can store what it calls *metadata* for each of these types of +items. This is expected to be a small piece of data, since it is cached in +memory and stored along with the library's own bookkeeping information. + +Additionally, the library can store one or more of what it calls *big data* for +images and containers. This is a named chunk of larger data, which is only in +memory when it is being read from or being written to its own disk file. + +**[Contributing](CONTRIBUTING.md)** +Information about contributing to this project. diff --git a/vendor/github.com/containers/storage/SECURITY.md b/vendor/github.com/containers/storage/SECURITY.md new file mode 100644 index 0000000000..ab2c14182f --- /dev/null +++ b/vendor/github.com/containers/storage/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the Containers Storage Project + +The Containers Storage Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION new file mode 100644 index 0000000000..ba0a719118 --- /dev/null +++ b/vendor/github.com/containers/storage/VERSION @@ -0,0 +1 @@ +1.51.0 diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go new file mode 100644 index 0000000000..e58084fc7c --- /dev/null +++ b/vendor/github.com/containers/storage/check.go @@ -0,0 +1,1153 @@ +package storage + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/types" + "github.com/sirupsen/logrus" +) + +var ( + // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver, + // but which is not known to or managed by the higher-level driver-agnostic logic. + ErrLayerUnaccounted = types.ErrLayerUnaccounted + // ErrLayerUnreferenced describes a layer which is not used by any image or container. + ErrLayerUnreferenced = types.ErrLayerUnreferenced + // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more + // files which were added in the layer appear to have changed. It may instead look like an + // unnamed "file integrity checksum failed" error. + ErrLayerIncorrectContentDigest = types.ErrLayerIncorrectContentDigest + // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was + // used to populate the layer produced a diff of a different size. We check the digest + // first, so it's highly unlikely you'll ever see this error. + ErrLayerIncorrectContentSize = types.ErrLayerIncorrectContentSize + // ErrLayerContentModified describes a layer which contains contents which should not be + // there, or for which ownership/permissions/dates have been changed. + ErrLayerContentModified = types.ErrLayerContentModified + // ErrLayerDataMissing describes a layer which is missing a big data item. + ErrLayerDataMissing = types.ErrLayerDataMissing + // ErrLayerMissing describes a layer which is the missing parent of a layer. + ErrLayerMissing = types.ErrLayerMissing + // ErrImageLayerMissing describes an image which claims to have a layer that we don't know + // about. + ErrImageLayerMissing = types.ErrImageLayerMissing + // ErrImageDataMissing describes an image which is missing a big data item. + ErrImageDataMissing = types.ErrImageDataMissing + // ErrImageDataIncorrectSize describes an image which has a big data item which looks like + // its size has changed, likely because it's been modified somehow. + ErrImageDataIncorrectSize = types.ErrImageDataIncorrectSize + // ErrContainerImageMissing describes a container which claims to be based on an image that + // we don't know about. + ErrContainerImageMissing = types.ErrContainerImageMissing + // ErrContainerDataMissing describes a container which is missing a big data item. + ErrContainerDataMissing = types.ErrContainerDataMissing + // ErrContainerDataIncorrectSize describes a container which has a big data item which looks + // like its size has changed, likely because it's been modified somehow. + ErrContainerDataIncorrectSize = types.ErrContainerDataIncorrectSize +) + +const ( + defaultMaximumUnreferencedLayerAge = 24 * time.Hour +) + +// CheckOptions is the set of options for Check(), specifying which tests to perform. +type CheckOptions struct { + LayerUnreferencedMaximumAge *time.Duration // maximum allowed age of unreferenced layers + LayerDigests bool // check that contents of image layer diffs can still be reconstructed + LayerMountable bool // check that layers are mountable + LayerContents bool // check that contents of image layers match their diffs, with no unexpected changes, requires LayerMountable + LayerData bool // check that associated "big" data items are present and can be read + ImageData bool // check that associated "big" data items are present, can be read, and match the recorded size + ContainerData bool // check that associated "big" data items are present and can be read +} + +// checkIgnore is used to tell functions that compare the contents of a mounted +// layer to the contents that we'd expect it to have to ignore certain +// discrepancies +type checkIgnore struct { + ownership, timestamps, permissions bool +} + +// CheckMost returns a CheckOptions with mostly just "quick" checks enabled. +func CheckMost() *CheckOptions { + return &CheckOptions{ + LayerDigests: true, + LayerMountable: true, + LayerContents: false, + LayerData: true, + ImageData: true, + ContainerData: true, + } +} + +// CheckEverything returns a CheckOptions with every check enabled. +func CheckEverything() *CheckOptions { + return &CheckOptions{ + LayerDigests: true, + LayerMountable: true, + LayerContents: true, + LayerData: true, + ImageData: true, + ContainerData: true, + } +} + +// CheckReport is a list of detected problems. +type CheckReport struct { + Layers map[string][]error // damaged read-write layers + ROLayers map[string][]error // damaged read-only layers + layerParentsByLayerID map[string]string + layerOrder map[string]int + Images map[string][]error // damaged read-write images (including those with damaged layers) + ROImages map[string][]error // damaged read-only images (including those with damaged layers) + Containers map[string][]error // damaged containers (including those based on damaged images) +} + +// RepairOptions is the set of options for Repair(). +type RepairOptions struct { + RemoveContainers bool // Remove damaged containers +} + +// RepairEverything returns a RepairOptions with every optional remediation +// enabled. +func RepairEverything() *RepairOptions { + return &RepairOptions{ + RemoveContainers: true, + } +} + +// Check returns a list of problems with what's in the store, as a whole. It can be very expensive +// to call. +func (s *store) Check(options *CheckOptions) (CheckReport, error) { + var ignore checkIgnore + for _, o := range s.graphOptions { + if strings.Contains(o, "ignore_chown_errors=true") { + ignore.ownership = true + } + if strings.HasPrefix(o, "force_mask=") { + ignore.permissions = true + } + } + for o := range s.pullOptions { + if strings.Contains(o, "use_hard_links") { + if s.pullOptions[o] == "true" { + ignore.timestamps = true + } + } + } + + if options == nil { + options = CheckMost() + } + + report := CheckReport{ + Layers: make(map[string][]error), + ROLayers: make(map[string][]error), + layerParentsByLayerID: make(map[string]string), // layers ID -> their parent's ID, if there is one + layerOrder: make(map[string]int), // layers ID -> order for removal, if we needed to remove them all + Images: make(map[string][]error), + ROImages: make(map[string][]error), + Containers: make(map[string][]error), + } + + // This map will track known layer IDs. If we have multiple stores, read-only ones can + // contain copies of layers that are in the read-write store, but we'll only ever be + // mounting or extracting contents from the read-write versions, since we always search it + // first. The boolean will track if the layer is referenced by at least one image or + // container. + referencedLayers := make(map[string]bool) + referencedROLayers := make(map[string]bool) + + // This map caches the headers for items included in layer diffs. + diffHeadersByLayer := make(map[string][]*tar.Header) + var diffHeadersByLayerMutex sync.Mutex + + // Walk the list of layer stores, looking at each layer that we didn't see in a + // previously-visited store. + if _, _, err := readOrWriteAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { + layers, err := store.Layers() + if err != nil { + return struct{}{}, true, err + } + isReadWrite := roLayerStoreIsReallyReadWrite(store) + readWriteDesc := "" + if !isReadWrite { + readWriteDesc = "read-only " + } + // Examine each layer in turn. + for i := range layers { + layer := layers[i] + id := layer.ID + // If we've already seen a layer with this ID, no need to process it again. + if _, checked := referencedLayers[id]; checked { + continue + } + if _, checked := referencedROLayers[id]; checked { + continue + } + // Note the parent of this layer, and add it to the map of known layers so + // that we know that we've visited it, but we haven't confirmed that it's + // used by anything. + report.layerParentsByLayerID[id] = layer.Parent + if isReadWrite { + referencedLayers[id] = false + } else { + referencedROLayers[id] = false + } + logrus.Debugf("checking %slayer %s", readWriteDesc, id) + // Check that all of the big data items are present and can be read. We + // have no digest or size information to compare the contents to (grumble), + // so we can't verify that the contents haven't been changed since they + // were stored. + if options.LayerData { + for _, name := range layer.BigDataNames { + func() { + rc, err := store.BigData(id, name) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err := fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, ErrLayerDataMissing) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + defer rc.Close() + if _, err = io.Copy(io.Discard, rc); err != nil { + err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + }() + } + } + // Check that the content we get back when extracting the layer's contents + // match the recorded digest and size. A layer for which they're not given + // isn't a part of an image, and is likely the read-write layer for a + // container, and we can't vouch for the integrity of its contents. + // For each layer with known contents, record the headers for the layer's + // diff, which we can use to reconstruct the expected contents for the tree + // we see when the layer is mounted. + if options.LayerDigests && layer.UncompressedDigest != "" { + func() { + expectedDigest := layer.UncompressedDigest + // Double-check that the digest isn't invalid somehow. + if err := layer.UncompressedDigest.Validate(); err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Extract the diff. + uncompressed := archive.Uncompressed + diffOptions := DiffOptions{ + Compression: &uncompressed, + } + diff, err := store.Diff("", id, &diffOptions) + if err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Digest and count the length of the diff. + digester := expectedDigest.Algorithm().Digester() + counter := ioutils.NewWriteCounter(digester.Hash()) + reader := io.TeeReader(diff, counter) + var wg sync.WaitGroup + var archiveErr error + wg.Add(1) + go func(layerID string, diffReader io.Reader) { + // Read the diff, one item at a time. + tr := tar.NewReader(diffReader) + hdr, err := tr.Next() + for err == nil { + diffHeadersByLayerMutex.Lock() + diffHeadersByLayer[layerID] = append(diffHeadersByLayer[layerID], hdr) + diffHeadersByLayerMutex.Unlock() + hdr, err = tr.Next() + } + if !errors.Is(err, io.EOF) { + archiveErr = err + } + // consume any trailer after the EOF marker + io.Copy(io.Discard, diffReader) + wg.Done() + }(id, reader) + wg.Wait() + diff.Close() + if archiveErr != nil { + // Reading the diff didn't end as expected + diffHeadersByLayerMutex.Lock() + delete(diffHeadersByLayer, id) + diffHeadersByLayerMutex.Unlock() + archiveErr = fmt.Errorf("%slayer %s: %w", readWriteDesc, id, archiveErr) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], archiveErr) + } else { + report.ROLayers[id] = append(report.ROLayers[id], archiveErr) + } + return + } + if digester.Digest() != layer.UncompressedDigest { + // The diff digest didn't match. + diffHeadersByLayerMutex.Lock() + delete(diffHeadersByLayer, id) + diffHeadersByLayerMutex.Unlock() + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, ErrLayerIncorrectContentDigest) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + } + if layer.UncompressedSize != -1 && counter.Count != layer.UncompressedSize { + // We expected the diff to have a specific size, and + // it didn't match. + diffHeadersByLayerMutex.Lock() + delete(diffHeadersByLayer, id) + diffHeadersByLayerMutex.Unlock() + err := fmt.Errorf("%slayer %s: read %d bytes instead of %d bytes: %w", readWriteDesc, id, counter.Count, layer.UncompressedSize, ErrLayerIncorrectContentSize) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + } + }() + } + } + // At this point we're out of things that we can be sure will work in read-only + // stores, so skip the rest for any stores that aren't also read-write stores. + if !isReadWrite { + return struct{}{}, false, nil + } + // Content and mount checks are also things that we can only be sure will work in + // read-write stores. + for i := range layers { + layer := layers[i] + id := layer.ID + // Compare to what we see when we mount the layer and walk the tree, and + // flag cases where content is in the layer that shouldn't be there. The + // tar-split implementation of Diff() won't catch this problem by itself. + if options.LayerMountable { + func() { + // Mount the layer. + mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel}) + if err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Unmount the layer when we're done in here. + defer func() { + if err := s.graphDriver.Put(id); err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + }() + // If we're not looking at layer contents, or we didn't + // look at the diff for this layer, we're done here. + if !options.LayerDigests || layer.UncompressedDigest == "" || !options.LayerContents { + return + } + // Build a list of all of the changes in all of the layers + // that make up the tree we're looking at. + diffHeaderSet := [][]*tar.Header{} + // If we don't know _all_ of the changes that produced this + // layer, it's not part of an image, so we're done here. + for layerID := id; layerID != ""; layerID = report.layerParentsByLayerID[layerID] { + diffHeadersByLayerMutex.Lock() + layerChanges, haveChanges := diffHeadersByLayer[layerID] + diffHeadersByLayerMutex.Unlock() + if !haveChanges { + return + } + // The diff headers for this layer go _before_ those of + // layers that inherited some of its contents. + diffHeaderSet = append([][]*tar.Header{layerChanges}, diffHeaderSet...) + } + expectedCheckDirectory := newCheckDirectoryDefaults() + for _, diffHeaders := range diffHeaderSet { + expectedCheckDirectory.headers(diffHeaders) + } + // Scan the directory tree under the mount point. + var idmap *idtools.IDMappings + if !s.canUseShifting(layer.UIDMap, layer.GIDMap) { + // we would have had to chown() layer contents to match ID maps + idmap = idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) + } + actualCheckDirectory, err := newCheckDirectoryFromDirectory(mountPoint) + if err != nil { + err := fmt.Errorf("scanning contents of %slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Every departure from our expectations is an error. + diffs := compareCheckDirectory(expectedCheckDirectory, actualCheckDirectory, idmap, ignore) + for _, diff := range diffs { + err := fmt.Errorf("%slayer %s: %s, %w", readWriteDesc, id, diff, ErrLayerContentModified) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + } + }() + } + } + // Check that we don't have any dangling parent layer references. + for id, parent := range report.layerParentsByLayerID { + // If this layer doesn't have a parent, no problem. + if parent == "" { + continue + } + // If we've already seen a layer with this parent ID, skip it. + if _, checked := referencedLayers[parent]; checked { + continue + } + if _, checked := referencedROLayers[parent]; checked { + continue + } + // We haven't seen a layer with the ID that this layer's record + // says is its parent's ID. + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, parent, ErrLayerMissing) + report.Layers[id] = append(report.Layers[id], err) + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // This map will track examined images. If we have multiple stores, read-only ones can + // contain copies of images that are also in the read-write store, or the read-write store + // may contain a duplicate entry that refers to layers in the read-only stores, but when + // trying to export them, we only look at the first copy of the image. + examinedImages := make(map[string]struct{}) + + // Walk the list of image stores, looking at each image that we didn't see in a + // previously-visited store. + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { + images, err := store.Images() + if err != nil { + return struct{}{}, true, err + } + isReadWrite := roImageStoreIsReallyReadWrite(store) + readWriteDesc := "" + if !isReadWrite { + readWriteDesc = "read-only " + } + // Examine each image in turn. + for i := range images { + image := images[i] + id := image.ID + // If we've already seen an image with this ID, skip it. + if _, checked := examinedImages[id]; checked { + continue + } + examinedImages[id] = struct{}{} + logrus.Debugf("checking %simage %s", readWriteDesc, id) + if options.ImageData { + // Check that all of the big data items are present and reading them + // back gives us the right amount of data. Even though we record + // digests that can be used to look them up, we don't know how they + // were calculated (they're only used as lookup keys), so do not try + // to check them. + for _, key := range image.BigDataNames { + func() { + data, err := store.BigData(id, key) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataMissing) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + return + } + err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, err) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + return + } + if int64(len(data)) != image.BigDataSizes[key] { + err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataIncorrectSize) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + return + } + }() + } + } + // Walk the layers list for the image. For every layer that the image uses + // that has errors, the layer's errors are also the image's errors. + examinedImageLayers := make(map[string]struct{}) + for _, topLayer := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if topLayer == "" { + continue + } + if _, checked := examinedImageLayers[topLayer]; checked { + continue + } + examinedImageLayers[topLayer] = struct{}{} + for layer := topLayer; layer != ""; layer = report.layerParentsByLayerID[layer] { + // The referenced layer should have a corresponding entry in + // one map or the other. + _, checked := referencedLayers[layer] + _, checkedRO := referencedROLayers[layer] + if !checked && !checkedRO { + err := fmt.Errorf("layer %s: %w", layer, ErrImageLayerMissing) + err = fmt.Errorf("%simage %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + } else { + // Count this layer as referenced. Whether by the + // image or one of its child layers doesn't matter + // at this point. + if _, ok := referencedLayers[layer]; ok { + referencedLayers[layer] = true + } + if _, ok := referencedROLayers[layer]; ok { + referencedROLayers[layer] = true + } + } + if isReadWrite { + if len(report.Layers[layer]) > 0 { + report.Images[id] = append(report.Images[id], report.Layers[layer]...) + } + if len(report.ROLayers[layer]) > 0 { + report.Images[id] = append(report.Images[id], report.ROLayers[layer]...) + } + } else { + if len(report.Layers[layer]) > 0 { + report.ROImages[id] = append(report.ROImages[id], report.Layers[layer]...) + } + if len(report.ROLayers[layer]) > 0 { + report.ROImages[id] = append(report.ROImages[id], report.ROLayers[layer]...) + } + } + } + } + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // Iterate over each container in turn. + if _, _, err := readContainerStore(s, func() (struct{}, bool, error) { + containers, err := s.containerStore.Containers() + if err != nil { + return struct{}{}, true, err + } + for i := range containers { + container := containers[i] + id := container.ID + logrus.Debugf("checking container %s", id) + if options.ContainerData { + // Check that all of the big data items are present and reading them + // back gives us the right amount of data. + for _, key := range container.BigDataNames { + func() { + data, err := s.containerStore.BigData(id, key) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataMissing) + report.Containers[id] = append(report.Containers[id], err) + return + } + err = fmt.Errorf("container %s: data item %q: %w", id, key, err) + report.Containers[id] = append(report.Containers[id], err) + return + } + if int64(len(data)) != container.BigDataSizes[key] { + err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataIncorrectSize) + report.Containers[id] = append(report.Containers[id], err) + return + } + }() + } + } + // Look at the container's base image. If the image has errors, the image's errors + // are the container's errors. + if container.ImageID != "" { + if _, checked := examinedImages[container.ImageID]; !checked { + err := fmt.Errorf("image %s: %w", container.ImageID, ErrContainerImageMissing) + report.Containers[id] = append(report.Containers[id], err) + } + if len(report.Images[container.ImageID]) > 0 { + report.Containers[id] = append(report.Containers[id], report.Images[container.ImageID]...) + } + if len(report.ROImages[container.ImageID]) > 0 { + report.Containers[id] = append(report.Containers[id], report.ROImages[container.ImageID]...) + } + } + // Count the container's layer as referenced. + if container.LayerID != "" { + referencedLayers[container.LayerID] = true + } + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // Now go back through all of the layer stores, and flag any layers which don't belong + // to an image or a container, and has been around longer than we can reasonably expect + // such a layer to be present before a corresponding image record is added. + if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { + if isReadWrite := roLayerStoreIsReallyReadWrite(store); !isReadWrite { + return struct{}{}, false, nil + } + layers, err := store.Layers() + if err != nil { + return struct{}{}, true, err + } + for _, layer := range layers { + maximumAge := defaultMaximumUnreferencedLayerAge + if options.LayerUnreferencedMaximumAge != nil { + maximumAge = *options.LayerUnreferencedMaximumAge + } + if referenced := referencedLayers[layer.ID]; !referenced { + if layer.Created.IsZero() || layer.Created.Add(maximumAge).Before(time.Now()) { + // Either we don't (and never will) know when this layer was + // created, or it was created far enough in the past that we're + // reasonably sure it's not part of an image that's being written + // right now. + err := fmt.Errorf("layer %s: %w", layer.ID, ErrLayerUnreferenced) + report.Layers[layer.ID] = append(report.Layers[layer.ID], err) + } + } + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // If the driver can tell us about which layers it knows about, we should have previously + // examined all of them. Any that we didn't are probably just wasted space. + // Note: if the driver doesn't support enumerating layers, it returns ErrNotSupported. + if err := s.startUsingGraphDriver(); err != nil { + return CheckReport{}, err + } + defer s.stopUsingGraphDriver() + layerList, err := s.graphDriver.ListLayers() + if err != nil && !errors.Is(err, drivers.ErrNotSupported) { + return CheckReport{}, err + } + if !errors.Is(err, drivers.ErrNotSupported) { + for i, id := range layerList { + if _, known := referencedLayers[id]; !known { + err := fmt.Errorf("layer %s: %w", id, ErrLayerUnaccounted) + report.Layers[id] = append(report.Layers[id], err) + } + report.layerOrder[id] = i + 1 + } + } + + return report, nil +} + +func roLayerStoreIsReallyReadWrite(store roLayerStore) bool { + return store.(*layerStore).lockfile.IsReadWrite() +} + +func roImageStoreIsReallyReadWrite(store roImageStore) bool { + return store.(*imageStore).lockfile.IsReadWrite() +} + +// Repair removes items which are themselves damaged, or which depend on items which are damaged. +// Errors are returned if an attempt to delete an item fails. +func (s *store) Repair(report CheckReport, options *RepairOptions) []error { + if options == nil { + options = RepairEverything() + } + var errs []error + // Just delete damaged containers. + if options.RemoveContainers { + for id := range report.Containers { + err := s.DeleteContainer(id) + if err != nil && !errors.Is(err, ErrContainerUnknown) { + err := fmt.Errorf("deleting container %s: %w", id, err) + errs = append(errs, err) + } + } + } + // Now delete damaged images. Note which layers were removed as part of removing those images. + deletedLayers := make(map[string]struct{}) + for id := range report.Images { + layers, err := s.DeleteImage(id, true) + if err != nil { + if !errors.Is(err, ErrImageUnknown) && !errors.Is(err, ErrLayerUnknown) { + err := fmt.Errorf("deleting image %s: %w", id, err) + errs = append(errs, err) + } + } else { + for _, layer := range layers { + logrus.Debugf("deleted layer %s", layer) + deletedLayers[layer] = struct{}{} + } + logrus.Debugf("deleted image %s", id) + } + } + // Build a list of the layers that we need to remove, sorted with parents of layers before + // layers that they are parents of. + layersToDelete := make([]string, 0, len(report.Layers)) + for id := range report.Layers { + layersToDelete = append(layersToDelete, id) + } + depth := func(id string) int { + d := 0 + parent := report.layerParentsByLayerID[id] + for parent != "" { + d++ + parent = report.layerParentsByLayerID[parent] + } + return d + } + isUnaccounted := func(errs []error) bool { + for _, err := range errs { + if errors.Is(err, ErrLayerUnaccounted) { + return true + } + } + return false + } + sort.Slice(layersToDelete, func(i, j int) bool { + // we've not heard of either of them, so remove them in the order the driver suggested + if isUnaccounted(report.Layers[layersToDelete[i]]) && + isUnaccounted(report.Layers[layersToDelete[j]]) && + report.layerOrder[layersToDelete[i]] != 0 && report.layerOrder[layersToDelete[j]] != 0 { + return report.layerOrder[layersToDelete[i]] < report.layerOrder[layersToDelete[j]] + } + // always delete the one we've heard of first + if isUnaccounted(report.Layers[layersToDelete[i]]) && !isUnaccounted(report.Layers[layersToDelete[j]]) { + return false + } + // always delete the one we've heard of first + if !isUnaccounted(report.Layers[layersToDelete[i]]) && isUnaccounted(report.Layers[layersToDelete[j]]) { + return true + } + // we've heard of both of them; the one that's on the end of a longer chain goes first + return depth(layersToDelete[i]) > depth(layersToDelete[j]) // closer-to-a-notional-base layers get removed later + }) + // Now delete the layers that haven't been removed along with images. + for _, id := range layersToDelete { + if _, ok := deletedLayers[id]; ok { + continue + } + for _, reportedErr := range report.Layers[id] { + var err error + // If a layer was unaccounted for, remove it at the storage driver level. + // Otherwise, remove it at the higher level and let the higher level + // logic worry about telling the storage driver to delete the layer. + if errors.Is(reportedErr, ErrLayerUnaccounted) { + if err = s.graphDriver.Remove(id); err != nil { + err = fmt.Errorf("deleting storage layer %s: %v", id, err) + } else { + logrus.Debugf("deleted storage layer %s", id) + } + } else { + var stillMounted bool + if stillMounted, err = s.Unmount(id, true); err == nil && !stillMounted { + logrus.Debugf("unmounted layer %s", id) + } else if err != nil { + logrus.Debugf("unmounting layer %s: %v", id, err) + } else { + logrus.Debugf("layer %s still mounted", id) + } + if err = s.DeleteLayer(id); err != nil { + err = fmt.Errorf("deleting layer %s: %w", id, err) + logrus.Debugf("deleted layer %s", id) + } + } + if err != nil && !errors.Is(err, ErrLayerUnknown) && !errors.Is(err, ErrNotALayer) && !errors.Is(err, os.ErrNotExist) { + errs = append(errs, err) + } + } + } + return errs +} + +// compareFileInfo returns a string summarizing what's different between the two checkFileInfos +func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string { + var comparison []string + if a.typeflag != b.typeflag { + comparison = append(comparison, fmt.Sprintf("filetype:%v→%v", a.typeflag, b.typeflag)) + } + if idmap != nil && !idmap.Empty() { + mappedUID, mappedGID, err := idmap.ToContainer(idtools.IDPair{UID: b.uid, GID: b.gid}) + if err != nil { + return err.Error() + } + b.uid, b.gid = mappedUID, mappedGID + } + if a.uid != b.uid && !ignore.ownership { + comparison = append(comparison, fmt.Sprintf("uid:%d→%d", a.uid, b.uid)) + } + if a.gid != b.gid && !ignore.ownership { + comparison = append(comparison, fmt.Sprintf("gid:%d→%d", a.gid, b.gid)) + } + if a.size != b.size { + comparison = append(comparison, fmt.Sprintf("size:%d→%d", a.size, b.size)) + } + if (os.ModeType|os.ModePerm)&a.mode != (os.ModeType|os.ModePerm)&b.mode && !ignore.permissions { + comparison = append(comparison, fmt.Sprintf("mode:%04o→%04o", a.mode, b.mode)) + } + if a.mtime != b.mtime && !ignore.timestamps { + comparison = append(comparison, fmt.Sprintf("mtime:0x%x→0x%x", a.mtime, b.mtime)) + } + return strings.Join(comparison, ",") +} + +// checkFileInfo is what we care about for files +type checkFileInfo struct { + typeflag byte + uid, gid int + size int64 + mode os.FileMode + mtime int64 // unix-style whole seconds +} + +// checkDirectory is a node in a filesystem record, possibly the top +type checkDirectory struct { + directory map[string]*checkDirectory // subdirectories + file map[string]checkFileInfo // non-directories + checkFileInfo +} + +// newCheckDirectory creates an empty checkDirectory +func newCheckDirectory(uid, gid int, size int64, mode os.FileMode, mtime int64) *checkDirectory { + return &checkDirectory{ + directory: make(map[string]*checkDirectory), + file: make(map[string]checkFileInfo), + checkFileInfo: checkFileInfo{ + typeflag: tar.TypeDir, + uid: uid, + gid: gid, + size: size, + mode: mode, + mtime: mtime, + }, + } +} + +// newCheckDirectoryDefaults creates an empty checkDirectory with hardwired defaults for the UID +// (0), GID (0), size (0) and permissions (0o555) +func newCheckDirectoryDefaults() *checkDirectory { + return newCheckDirectory(0, 0, 0, 0o555, time.Now().Unix()) +} + +// newCheckDirectoryFromDirectory creates a checkDirectory for an on-disk directory tree +func newCheckDirectoryFromDirectory(dir string) (*checkDirectory, error) { + cd := newCheckDirectoryDefaults() + err := filepath.Walk(dir, func(walkpath string, info os.FileInfo, err error) error { + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + rel, err := filepath.Rel(dir, walkpath) + if err != nil { + return err + } + hdr, err := tar.FileInfoHeader(info, "") // we don't record link targets, so don't bother looking it up + if err != nil { + return err + } + hdr.Name = filepath.ToSlash(rel) + cd.header(hdr) + return nil + }) + if err != nil { + return nil, err + } + return cd, nil +} + +// add adds an item to a checkDirectory +func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int64, mode os.FileMode, mtime int64) { + components := strings.Split(path, "/") + if components[len(components)-1] == "" { + components = components[:len(components)-1] + } + if components[0] == "." { + components = components[1:] + } + if typeflag != tar.TypeReg { + size = 0 + } + switch len(components) { + case 0: + c.uid = uid + c.gid = gid + c.mode = mode + c.mtime = mtime + return + case 1: + switch typeflag { + case tar.TypeDir: + delete(c.file, components[0]) + // directory entries are mergers, not replacements + if _, present := c.directory[components[0]]; !present { + c.directory[components[0]] = newCheckDirectory(uid, gid, size, mode, mtime) + } else { + c.directory[components[0]].checkFileInfo = checkFileInfo{ + typeflag: tar.TypeDir, + uid: uid, + gid: gid, + size: size, + mode: mode, + mtime: mtime, + } + } + default: + // treat these as TypeReg items + delete(c.directory, components[0]) + c.file[components[0]] = checkFileInfo{ + typeflag: typeflag, + uid: uid, + gid: gid, + size: size, + mode: mode, + mtime: mtime, + } + case tar.TypeXGlobalHeader: + // ignore, since even though it looks like a valid pathname, it doesn't end + // up on the filesystem + } + return + } + subdirectory := c.directory[components[0]] + if subdirectory == nil { + subdirectory = newCheckDirectory(uid, gid, size, mode, mtime) + c.directory[components[0]] = subdirectory + } + subdirectory.add(strings.Join(components[1:], "/"), typeflag, uid, gid, size, mode, mtime) +} + +// remove removes an item from a checkDirectory +func (c *checkDirectory) remove(path string) { + components := strings.Split(path, "/") + if len(components) == 1 { + delete(c.directory, components[0]) + delete(c.file, components[0]) + return + } + subdirectory := c.directory[components[0]] + if subdirectory != nil { + subdirectory.remove(strings.Join(components[1:], "/")) + } +} + +// header updates a checkDirectory using information from the passed-in header +func (c *checkDirectory) header(hdr *tar.Header) { + name := path.Clean(hdr.Name) + dir, base := path.Split(name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + if base == archive.WhiteoutOpaqueDir { + c.remove(path.Clean(dir)) + c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix()) + } else { + c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):])) + } + } else { + if hdr.Typeflag == tar.TypeLink { + // look up the attributes of the target of the hard link + // n.b. by convention, Linkname is always relative to the + // root directory of the archive, which is not always the + // same as being relative to hdr.Name + directory := c + for _, component := range strings.Split(path.Clean(hdr.Linkname), "/") { + if component == "." || component == ".." { + continue + } + if subdir, ok := directory.directory[component]; ok { + directory = subdir + continue + } + if file, ok := directory.file[component]; ok { + hdr.Typeflag = file.typeflag + hdr.Uid = file.uid + hdr.Gid = file.gid + hdr.Size = file.size + hdr.Mode = int64(file.mode) + hdr.ModTime = time.Unix(file.mtime, 0) + } + break + } + } + c.add(name, hdr.Typeflag, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix()) + } +} + +// headers updates a checkDirectory using information from the passed-in header slice +func (c *checkDirectory) headers(hdrs []*tar.Header) { + hdrs = append([]*tar.Header{}, hdrs...) + // sort the headers from the diff to ensure that whiteouts appear + // before content when they both appear in the same directory, per + // https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts + // and that hard links appear after other types of entries + sort.SliceStable(hdrs, func(i, j int) bool { + if hdrs[i].Typeflag != tar.TypeLink && hdrs[j].Typeflag == tar.TypeLink { + return true + } + if hdrs[i].Typeflag == tar.TypeLink && hdrs[j].Typeflag != tar.TypeLink { + return false + } + idir, ifile := path.Split(hdrs[i].Name) + jdir, jfile := path.Split(hdrs[j].Name) + if idir != jdir { + return hdrs[i].Name < hdrs[j].Name + } + if ifile == archive.WhiteoutOpaqueDir { + return true + } + if strings.HasPrefix(ifile, archive.WhiteoutPrefix) && !strings.HasPrefix(jfile, archive.WhiteoutPrefix) { + return true + } + return false + }) + for _, hdr := range hdrs { + c.header(hdr) + } +} + +// names provides a sorted list of the path names in the directory tree +func (c *checkDirectory) names() []string { + names := make([]string, 0, len(c.file)+len(c.directory)) + for name := range c.file { + names = append(names, name) + } + for name, subdirectory := range c.directory { + names = append(names, name+"/") + for _, subname := range subdirectory.names() { + names = append(names, name+"/"+subname) + } + } + return names +} + +// compareCheckSubdirectory walks two subdirectory trees and returns a list of differences +func compareCheckSubdirectory(path string, a, b *checkDirectory, idmap *idtools.IDMappings, ignore checkIgnore) []string { + var diff []string + if a == nil { + a = newCheckDirectoryDefaults() + } + if b == nil { + b = newCheckDirectoryDefaults() + } + for aname, adir := range a.directory { + if bdir, present := b.directory[aname]; !present { + // directory was removed + diff = append(diff, "-"+path+"/"+aname+"/") + diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, nil, idmap, ignore)...) + } else { + // directory is in both trees; descend + if attributes := compareFileInfo(adir.checkFileInfo, bdir.checkFileInfo, idmap, ignore); attributes != "" { + diff = append(diff, path+"/"+aname+"("+attributes+")") + } + diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, bdir, idmap, ignore)...) + } + } + for bname, bdir := range b.directory { + if _, present := a.directory[bname]; !present { + // directory added + diff = append(diff, "+"+path+"/"+bname+"/") + diff = append(diff, compareCheckSubdirectory(path+"/"+bname, nil, bdir, idmap, ignore)...) + } + } + for aname, afile := range a.file { + if bfile, present := b.file[aname]; !present { + // non-directory removed or replaced + diff = append(diff, "-"+path+"/"+aname) + } else { + // item is in both trees; compare + if attributes := compareFileInfo(afile, bfile, idmap, ignore); attributes != "" { + diff = append(diff, path+"/"+aname+"("+attributes+")") + } + } + } + for bname := range b.file { + filetype, present := a.file[bname] + if !present { + // non-directory added or replaced with something else + diff = append(diff, "+"+path+"/"+bname) + continue + } + if attributes := compareFileInfo(filetype, b.file[bname], idmap, ignore); attributes != "" { + // non-directory replaced with non-directory + diff = append(diff, "+"+path+"/"+bname+"("+attributes+")") + } + } + return diff +} + +// compareCheckDirectory walks two directory trees and returns a sorted list of differences +func compareCheckDirectory(a, b *checkDirectory, idmap *idtools.IDMappings, ignore checkIgnore) []string { + diff := compareCheckSubdirectory("", a, b, idmap, ignore) + sort.Slice(diff, func(i, j int) bool { + if strings.Compare(diff[i][1:], diff[j][1:]) < 0 { + return true + } + if diff[i][0] == '-' { + return true + } + return false + }) + return diff +} diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go new file mode 100644 index 0000000000..a7dfb405ba --- /dev/null +++ b/vendor/github.com/containers/storage/containers.go @@ -0,0 +1,981 @@ +package storage + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sync" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +type containerLocations uint8 + +// The backing store is split in two json files, one (the volatile) +// that is written without fsync() meaning it isn't as robust to +// unclean shutdown +const ( + stableContainerLocation containerLocations = 1 << iota + volatileContainerLocation + + numContainerLocationIndex = iota +) + +func containerLocationFromIndex(index int) containerLocations { + return 1 << index +} + +// A Container is a reference to a read-write layer with metadata. +type Container struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // container can be referred to by its ID or any of its names. Names + // are unique among containers. + Names []string `json:"names,omitempty"` + + // ImageID is the ID of the image which was used to create the container. + ImageID string `json:"image"` + + // LayerID is the ID of the read-write layer for the container itself. + // It is assumed that the image's top layer is the parent of the container's + // read-write layer. + LayerID string `json:"layer"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + + // Created is the datestamp for when this container was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // UIDMap and GIDMap are used for setting up a container's root + // filesystem for use inside of a user namespace where UID mapping is + // being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + + Flags map[string]interface{} `json:"flags,omitempty"` + + // volatileStore is true if the container is from the volatile json file + volatileStore bool `json:"-"` +} + +// rwContainerStore provides bookkeeping for information about Containers. +type rwContainerStore interface { + metadataStore + containerBigDataStore + flaggableStore + + // startWriting makes sure the store is fresh, and locks it for writing. + // If this succeeds, the caller MUST call stopWriting(). + startWriting() error + + // stopWriting releases locks obtained by startWriting. + stopWriting() + + // startReading makes sure the store is fresh, and locks it for reading. + // If this succeeds, the caller MUST call stopReading(). + startReading() error + + // stopReading releases locks obtained by startReading. + stopReading() + + // create creates a container that has a specified ID (or generates a + // random one if an empty value is supplied) and optional names, + // based on the specified image, using the specified layer as its + // read-write layer. + // The maps in the container's options structure are recorded for the + // convenience of the caller, nothing more. + create(id string, names []string, image, layer string, options *ContainerOptions) (*Container, error) + + // updateNames modifies names associated with a container based on (op, names). + updateNames(id string, names []string, op updateNameOperation) error + + // Get retrieves information about a container given an ID or name. + Get(id string) (*Container, error) + + // Exists checks if there is a container with the given ID or name. + Exists(id string) bool + + // Delete removes the record of the container. + Delete(id string) error + + // Wipe removes records of all containers. + Wipe() error + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Containers returns a slice enumerating the known containers. + Containers() ([]Container, error) + + // Clean up unreferenced datadirs + GarbageCollect() error +} + +type containerStore struct { + // The following fields are only set when constructing containerStore, and must never be modified afterwards. + // They are safe to access without any other locking. + lockfile *lockfile.LockFile // Synchronizes readers vs. writers of the _filesystem data_, both cross-process and in-process. + dir string + jsonPath [numContainerLocationIndex]string + + inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. + // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. + // Almost all users should use startReading() or startWriting(). + lastWrite lockfile.LastWrite + containers []*Container + idindex *truncindex.TruncIndex + byid map[string]*Container + bylayer map[string]*Container + byname map[string]*Container +} + +func copyContainer(c *Container) *Container { + return &Container{ + ID: c.ID, + Names: copyStringSlice(c.Names), + ImageID: c.ImageID, + LayerID: c.LayerID, + Metadata: c.Metadata, + BigDataNames: copyStringSlice(c.BigDataNames), + BigDataSizes: copyStringInt64Map(c.BigDataSizes), + BigDataDigests: copyStringDigestMap(c.BigDataDigests), + Created: c.Created, + UIDMap: copyIDMap(c.UIDMap), + GIDMap: copyIDMap(c.GIDMap), + Flags: copyStringInterfaceMap(c.Flags), + volatileStore: c.volatileStore, + } +} + +func (c *Container) MountLabel() string { + if label, ok := c.Flags[mountLabelFlag].(string); ok { + return label + } + return "" +} + +func (c *Container) ProcessLabel() string { + if label, ok := c.Flags[processLabelFlag].(string); ok { + return label + } + return "" +} + +func (c *Container) MountOpts() []string { + switch value := c.Flags[mountOptsFlag].(type) { + case []string: + return value + case []interface{}: + var mountOpts []string + for _, v := range value { + if flag, ok := v.(string); ok { + mountOpts = append(mountOpts, flag) + } + } + return mountOpts + default: + return nil + } +} + +// The caller must hold r.inProcessLock for reading. +func containerLocation(c *Container) containerLocations { + if c.volatileStore { + return volatileContainerLocation + } + return stableContainerLocation +} + +// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +// +// This is an internal implementation detail of containerStore construction, every other caller +// should use startWriting() instead. +func (r *containerStore) startWritingWithReload(canReload bool) error { + r.lockfile.Lock() + r.inProcessLock.Lock() + succeeded := false + defer func() { + if !succeeded { + r.inProcessLock.Unlock() + r.lockfile.Unlock() + } + }() + + if canReload { + if _, err := r.reloadIfChanged(true); err != nil { + return err + } + } + + succeeded = true + return nil +} + +// startWriting makes sure the store is fresh, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +func (r *containerStore) startWriting() error { + return r.startWritingWithReload(true) +} + +// stopWriting releases locks obtained by startWriting. +func (r *containerStore) stopWriting() { + r.inProcessLock.Unlock() + r.lockfile.Unlock() +} + +// startReading makes sure the store is fresh, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +func (r *containerStore) startReading() error { + // inProcessLocked calls the nested function with r.inProcessLock held for writing. + inProcessLocked := func(fn func() error) error { + r.inProcessLock.Lock() + defer r.inProcessLock.Unlock() + return fn() + } + + r.lockfile.RLock() + unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil. + defer func() { + if unlockFn != nil { + unlockFn() + } + }() + r.inProcessLock.RLock() + unlockFn = r.stopReading + + // If we are lucky, we can just hold the read locks, check that we are fresh, and continue. + _, modified, err := r.modified() + if err != nil { + return err + } + if modified { + // We are unlucky, and need to reload. + // NOTE: Multiple goroutines can get to this place approximately simultaneously. + r.inProcessLock.RUnlock() + unlockFn = r.lockfile.Unlock + + // r.lastWrite can change at this point if another goroutine reloads the store before us. That’s why we don’t unconditionally + // trigger a load below; we (lock and) reloadIfChanged() again. + + // First try reloading with r.lockfile held for reading. + // r.inProcessLock will serialize all goroutines that got here; + // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data. + var tryLockedForWriting bool + if err := inProcessLocked(func() error { + // We could optimize this further: The r.lockfile.GetLastWrite() value shouldn’t change as long as we hold r.lockfile, + // so if r.lastWrite was already updated, we don’t need to actually read the on-filesystem lock. + var err error + tryLockedForWriting, err = r.reloadIfChanged(false) + return err + }); err != nil { + if !tryLockedForWriting { + return err + } + // Not good enough, we need r.lockfile held for writing. So, let’s do that. + unlockFn() + unlockFn = nil + + r.lockfile.Lock() + unlockFn = r.lockfile.Unlock + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(true) + return err + }); err != nil { + return err + } + unlockFn() + unlockFn = nil + + r.lockfile.RLock() + unlockFn = r.lockfile.Unlock + // We need to check for a reload once more because the on-disk state could have been modified + // after we released the lock. + // If that, _again_, finds inconsistent state, just give up. + // We could, plausibly, retry a few times, but that inconsistent state (duplicate container names) + // shouldn’t be saved (by correct implementations) in the first place. + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(false) + return err + }); err != nil { + return fmt.Errorf("(even after successfully cleaning up once:) %w", err) + } + } + + // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because + // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock + // protects us against in-process writers modifying data. + // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date + // and 2) access to the in-memory data is not racy; + // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state. + + r.inProcessLock.RLock() + } + unlockFn = nil + return nil +} + +// stopReading releases locks obtained by startReading. +func (r *containerStore) stopReading() { + r.inProcessLock.RUnlock() + r.lockfile.Unlock() +} + +// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store), +// and a lockfile.LastWrite value for that update. +// +// The caller must hold r.lockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *containerStore) modified() (lockfile.LastWrite, bool, error) { + return r.lockfile.ModifiedSince(r.lastWrite) +} + +// reloadIfChanged reloads the contents of the store from disk if it is changed. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// reloadIfChanged() with lockedForWriting could succeed. +func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) { + lastWrite, modified, err := r.modified() + if err != nil { + return false, err + } + // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load() + // and modify no fields, to ensure they see fresh data: + // r.lockfile.Modified() only returns true once per change. Without an exclusive lock, + // one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could + // see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale. + if modified { + if tryLockedForWriting, err := r.load(lockedForWriting); err != nil { + return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again. + } + r.lastWrite = lastWrite + } + return false, nil +} + +// Requires startReading or startWriting. +func (r *containerStore) Containers() ([]Container, error) { + containers := make([]Container, len(r.containers)) + for i := range r.containers { + containers[i] = *copyContainer(r.containers[i]) + } + return containers, nil +} + +// This looks for datadirs in the store directory that are not referenced +// by the json file and removes it. These can happen in the case of unclean +// shutdowns or regular restarts in transient store mode. +// Requires startReading. +func (r *containerStore) GarbageCollect() error { + entries, err := os.ReadDir(r.dir) + if err != nil { + // Unexpected, don't try any GC + return err + } + + for _, entry := range entries { + id := entry.Name() + // Does it look like a datadir directory? + if !entry.IsDir() || stringid.ValidateID(id) != nil { + continue + } + + // Should the id be there? + if r.byid[id] != nil { + continue + } + + // Otherwise remove datadir + logrus.Debugf("removing %q", filepath.Join(r.dir, id)) + moreErr := os.RemoveAll(filepath.Join(r.dir, id)) + // Propagate first error + if moreErr != nil && err == nil { + err = moreErr + } + } + + return err +} + +func (r *containerStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *containerStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +// load reloads the contents of the store from disk. +// +// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly +// manage r.lastWrite. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// retrying with lockedForWriting could succeed. +func (r *containerStore) load(lockedForWriting bool) (bool, error) { + var modifiedLocations containerLocations + containers := []*Container{} + + ids := make(map[string]*Container) + + for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { + location := containerLocationFromIndex(locationIndex) + rpath := r.jsonPath[locationIndex] + + data, err := os.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return false, err + } + + locationContainers := []*Container{} + if len(data) != 0 { + if err := json.Unmarshal(data, &locationContainers); err != nil { + return false, fmt.Errorf("loading %q: %w", rpath, err) + } + } + + for _, container := range locationContainers { + // There should be no duplicated ids between json files, but lets check to be sure + if ids[container.ID] != nil { + continue // skip invalid duplicated container + } + // Remember where the container came from + if location == volatileContainerLocation { + container.volatileStore = true + } + containers = append(containers, container) + ids[container.ID] = container + } + } + + idlist := make([]string, 0, len(containers)) + layers := make(map[string]*Container) + names := make(map[string]*Container) + var errorToResolveBySaving error // == nil + for n, container := range containers { + idlist = append(idlist, container.ID) + layers[container.LayerID] = containers[n] + for _, name := range container.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock") + modifiedLocations |= containerLocation(container) + } + names[name] = containers[n] + } + } + + r.containers = containers + r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. + r.byid = ids + r.bylayer = layers + r.byname = names + if errorToResolveBySaving != nil { + if !lockedForWriting { + return true, errorToResolveBySaving + } + return false, r.save(modifiedLocations) + } + return false, nil +} + +// save saves the contents of the store to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). +func (r *containerStore) save(saveLocations containerLocations) error { + r.lockfile.AssertLockedForWriting() + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. + lw, err := r.lockfile.RecordWrite() + if err != nil { + return err + } + r.lastWrite = lw + for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { + location := containerLocationFromIndex(locationIndex) + if location&saveLocations == 0 { + continue + } + rpath := r.jsonPath[locationIndex] + if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { + return err + } + subsetContainers := make([]*Container, 0, len(r.containers)) + for _, container := range r.containers { + if containerLocation(container) == location { + subsetContainers = append(subsetContainers, container) + } + } + + jdata, err := json.Marshal(&subsetContainers) + if err != nil { + return err + } + var opts *ioutils.AtomicFileWriterOptions + if location == volatileContainerLocation { + opts = &ioutils.AtomicFileWriterOptions{ + NoSync: true, + } + } + if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0o600, opts); err != nil { + return err + } + } + return nil +} + +// saveFor saves the contents of the store relevant for modifiedContainer to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). +func (r *containerStore) saveFor(modifiedContainer *Container) error { + return r.save(containerLocation(modifiedContainer)) +} + +func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) { + if err := os.MkdirAll(dir, 0o700); err != nil { + return nil, err + } + volatileDir := dir + if transient { + if err := os.MkdirAll(runDir, 0o700); err != nil { + return nil, err + } + volatileDir = runDir + } + lockfile, err := lockfile.GetLockFile(filepath.Join(volatileDir, "containers.lock")) + if err != nil { + return nil, err + } + cstore := containerStore{ + lockfile: lockfile, + dir: dir, + jsonPath: [numContainerLocationIndex]string{ + filepath.Join(dir, "containers.json"), + filepath.Join(volatileDir, "volatile-containers.json"), + }, + + containers: []*Container{}, + byid: make(map[string]*Container), + bylayer: make(map[string]*Container), + byname: make(map[string]*Container), + } + + if err := cstore.startWritingWithReload(false); err != nil { + return nil, err + } + cstore.lastWrite, err = cstore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + defer cstore.stopWriting() + if _, err := cstore.load(true); err != nil { + return nil, err + } + return &cstore, nil +} + +// Requires startReading or startWriting. +func (r *containerStore) lookup(id string) (*Container, bool) { + if container, ok := r.byid[id]; ok { + return container, ok + } else if container, ok := r.byname[id]; ok { + return container, ok + } else if container, ok := r.bylayer[id]; ok { + return container, ok + } else if longid, err := r.idindex.Get(id); err == nil { + if container, ok := r.byid[longid]; ok { + return container, ok + } + } + return nil, false +} + +// Requires startWriting. +func (r *containerStore) ClearFlag(id string, flag string) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + delete(container.Flags, flag) + return r.saveFor(container) +} + +// Requires startWriting. +func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + if container.Flags == nil { + container.Flags = make(map[string]interface{}) + } + container.Flags[flag] = value + return r.saveFor(container) +} + +// Requires startWriting. +func (r *containerStore) create(id string, names []string, image, layer string, options *ContainerOptions) (container *Container, err error) { + if options == nil { + options = &ContainerOptions{} + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, ErrDuplicateID + } + names = dedupeStrings(names) + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, fmt.Errorf("the container name %q is already in use by %s. You have to remove that container to be able to reuse that name: %w", name, r.byname[name].ID, ErrDuplicateName) + } + } + if err := hasOverlappingRanges(options.UIDMap); err != nil { + return nil, err + } + if err := hasOverlappingRanges(options.GIDMap); err != nil { + return nil, err + } + container = &Container{ + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: options.Metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: time.Now().UTC(), + Flags: copyStringInterfaceMap(options.Flags), + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + volatileStore: options.Volatile, + } + if options.MountOpts != nil { + container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...) + } + if options.Volatile { + container.Flags[volatileFlag] = true + } + r.containers = append(r.containers, container) + // This can only fail on duplicate IDs, which shouldn’t happen — and in + // that case the index is already in the desired state anyway. + // Implementing recovery from an unlikely and unimportant failure here + // would be too risky. + _ = r.idindex.Add(id) + r.byid[id] = container + r.bylayer[layer] = container + for _, name := range names { + r.byname[name] = container + } + defer func() { + if err != nil { + // now that the in-memory structures know about the new + // record, we can use regular Delete() to clean up if + // anything breaks from here on out + if e := r.Delete(id); e != nil { + logrus.Debugf("while cleaning up partially-created container %q we failed to create: %v", id, e) + } + } + }() + err = r.saveFor(container) + if err != nil { + return nil, err + } + for _, item := range options.BigData { + if err = r.SetBigData(id, item.Key, item.Data); err != nil { + return nil, err + } + } + container = copyContainer(container) + return container, err +} + +// Requires startReading or startWriting. +func (r *containerStore) Metadata(id string) (string, error) { + if container, ok := r.lookup(id); ok { + return container.Metadata, nil + } + return "", ErrContainerUnknown +} + +// Requires startWriting. +func (r *containerStore) SetMetadata(id, metadata string) error { + if container, ok := r.lookup(id); ok { + container.Metadata = metadata + return r.saveFor(container) + } + return ErrContainerUnknown +} + +// The caller must hold r.inProcessLock for writing. +func (r *containerStore) removeName(container *Container, name string) { + container.Names = stringSliceWithoutValue(container.Names, name) +} + +// Requires startWriting. +func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + oldNames := container.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) + } + r.byname[name] = container + } + container.Names = names + return r.saveFor(container) +} + +// Requires startWriting. +func (r *containerStore) Delete(id string) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + id = container.ID + toDeleteIndex := -1 + for i, candidate := range r.containers { + if candidate.ID == id { + toDeleteIndex = i + break + } + } + delete(r.byid, id) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Delete(id) + delete(r.bylayer, container.LayerID) + for _, name := range container.Names { + delete(r.byname, name) + } + if toDeleteIndex != -1 { + // delete the container at toDeleteIndex + if toDeleteIndex == len(r.containers)-1 { + r.containers = r.containers[:len(r.containers)-1] + } else { + r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) + } + } + if err := r.saveFor(container); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + return nil +} + +// Requires startReading or startWriting. +func (r *containerStore) Get(id string) (*Container, error) { + if container, ok := r.lookup(id); ok { + return copyContainer(container), nil + } + return nil, ErrContainerUnknown +} + +// Requires startReading or startWriting. +func (r *containerStore) Lookup(name string) (id string, err error) { + if container, ok := r.lookup(name); ok { + return container.ID, nil + } + return "", ErrContainerUnknown +} + +// Requires startReading or startWriting. +func (r *containerStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +// Requires startReading or startWriting. +func (r *containerStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName) + } + c, ok := r.lookup(id) + if !ok { + return nil, ErrContainerUnknown + } + return os.ReadFile(r.datapath(c.ID, key)) +} + +// Requires startWriting. Yes, really, WRITING (see SetBigData). +func (r *containerStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName) + } + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if size, ok := c.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil. + return size, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if err = r.SetBigData(id, key, data); err == nil { + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if size, ok := c.BigDataSizes[key]; ok { + return size, nil + } + } else { + return -1, err + } + } + return -1, ErrSizeUnknown +} + +// Requires startWriting. Yes, really, WRITING (see SetBigData). +func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName) + } + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if d, ok := c.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil. + return d, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if err = r.SetBigData(id, key, data); err == nil { + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + } else { + return "", err + } + } + return "", ErrDigestUnknown +} + +// Requires startReading or startWriting. +func (r *containerStore) BigDataNames(id string) ([]string, error) { + c, ok := r.lookup(id) + if !ok { + return nil, ErrContainerUnknown + } + return copyStringSlice(c.BigDataNames), nil +} + +// Requires startWriting. +func (r *containerStore) SetBigData(id, key string, data []byte) error { + if key == "" { + return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName) + } + c, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + if err := os.MkdirAll(r.datadir(c.ID), 0o700); err != nil { + return err + } + err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0o600) + if err == nil { + save := false + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := c.BigDataSizes[key] + c.BigDataSizes[key] = int64(len(data)) + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := c.BigDataDigests[key] + newDigest := digest.Canonical.FromBytes(data) + c.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + save = true + } + addName := true + for _, name := range c.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + c.BigDataNames = append(c.BigDataNames, key) + save = true + } + if save { + err = r.saveFor(c) + } + } + return err +} + +// Requires startWriting. +func (r *containerStore) Wipe() error { + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/deprecated.go b/vendor/github.com/containers/storage/deprecated.go new file mode 100644 index 0000000000..04972d8388 --- /dev/null +++ b/vendor/github.com/containers/storage/deprecated.go @@ -0,0 +1,216 @@ +package storage + +import ( + "io" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" +) + +// The type definitions in this file exist ONLY to maintain formal API compatibility. +// DO NOT ADD ANY NEW METHODS TO THESE INTERFACES. + +// ROFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROFileBasedStore interface { + Locker + Load() error + ReloadIfChanged() error +} + +// RWFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWFileBasedStore interface { + Save() error +} + +// FileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type FileBasedStore interface { + ROFileBasedStore + RWFileBasedStore +} + +// ROMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROMetadataStore interface { + Metadata(id string) (string, error) +} + +// RWMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWMetadataStore interface { + SetMetadata(id, metadata string) error +} + +// MetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type MetadataStore interface { + ROMetadataStore + RWMetadataStore +} + +// ROBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROBigDataStore interface { + BigData(id, key string) ([]byte, error) + BigDataSize(id, key string) (int64, error) + BigDataDigest(id, key string) (digest.Digest, error) + BigDataNames(id string) ([]string, error) +} + +// RWImageBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWImageBigDataStore interface { + SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error +} + +// ContainerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ContainerBigDataStore interface { + ROBigDataStore + SetBigData(id, key string, data []byte) error +} + +// ROLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROLayerBigDataStore interface { + BigData(id, key string) (io.ReadCloser, error) + BigDataNames(id string) ([]string, error) +} + +// RWLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWLayerBigDataStore interface { + SetBigData(id, key string, data io.Reader) error +} + +// LayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type LayerBigDataStore interface { + ROLayerBigDataStore + RWLayerBigDataStore +} + +// FlaggableStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type FlaggableStore interface { + ClearFlag(id string, flag string) error + SetFlag(id string, flag string, value interface{}) error +} + +// ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ContainerStore interface { + FileBasedStore + MetadataStore + ContainerBigDataStore + FlaggableStore + Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + SetNames(id string, names []string) error + AddNames(id string, names []string) error + RemoveNames(id string, names []string) error + Get(id string) (*Container, error) + Exists(id string) bool + Delete(id string) error + Wipe() error + Lookup(name string) (string, error) + Containers() ([]Container, error) +} + +// ROImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROImageStore interface { + ROFileBasedStore + ROMetadataStore + ROBigDataStore + Exists(id string) bool + Get(id string) (*Image, error) + Lookup(name string) (string, error) + Images() ([]Image, error) + ByDigest(d digest.Digest) ([]*Image, error) +} + +// ImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ImageStore interface { + ROImageStore + RWFileBasedStore + RWMetadataStore + RWImageBigDataStore + FlaggableStore + Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) + SetNames(id string, names []string) error + AddNames(id string, names []string) error + RemoveNames(id string, names []string) error + Delete(id string) error + Wipe() error +} + +// ROLayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROLayerStore interface { + ROFileBasedStore + ROMetadataStore + ROLayerBigDataStore + Exists(id string) bool + Get(id string) (*Layer, error) + Status() ([][2]string, error) + Changes(from, to string) ([]archive.Change, error) + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + DiffSize(from, to string) (int64, error) + Size(name string) (int64, error) + Lookup(name string) (string, error) + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + Layers() ([]Layer, error) +} + +// LayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type LayerStore interface { + ROLayerStore + RWFileBasedStore + RWMetadataStore + FlaggableStore + RWLayerBigDataStore + Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) + CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) + Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) + SetNames(id string, names []string) error + AddNames(id string, names []string) error + RemoveNames(id string, names []string) error + Delete(id string) error + Wipe() error + Mount(id string, options drivers.MountOpts) (string, error) + Unmount(id string, force bool) (bool, error) + Mounted(id string) (int, error) + ParentOwners(id string) (uids, gids []int, err error) + ApplyDiff(to string, diff io.Reader) (int64, error) + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + CleanupStagingDirectory(stagingDirectory string) error + ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + DifferTarget(id string) (string, error) + LoadLocked() error + PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go new file mode 100644 index 0000000000..0b17662109 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -0,0 +1,781 @@ +//go:build linux +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "time" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + mountpk "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + "golang.org/x/sys/unix" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("aufs was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("aufs cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +const defaultPerms = os.FileMode(0o555) + +func init() { + graphdriver.MustRegister("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver + locker *locker.Locker + mountOptions string +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported) + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, fmt.Errorf("aufs is not supported over %q: %w", backingFs, graphdriver.ErrIncompatibleFS) + } + + var mountOptions string + for _, option := range options.DriverOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "aufs.mountopt": + mountOptions = val + default: + return nil, fmt.Errorf("option %s not supported", option) + } + } + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: home, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + locker: locker.New(), + mountOptions: mountOptions, + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(home, 0o700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(home); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(home, p), 0o700, rootUID, rootGID); err != nil { + return nil, err + } + } + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + }) + + for _, path := range []string{"mnt", "diff"} { + p := filepath.Join(home, path) + entries, err := os.ReadDir(p) + if err != nil { + logger.WithError(err).WithField("dir", p).Error("error reading dir entries") + continue + } + for _, entry := range entries { + if !entry.IsDir() { + continue + } + if strings.HasSuffix(entry.Name(), "-removing") { + logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") + if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { + logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") + } + } + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, a) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if unshare.IsRootless() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// Metadata not implemented +func (a *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil //nolint: nilnil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// ListLayers() returns all of the layers known to the driver. +func (a *Driver) ListLayers() ([]string, error) { + diffsDir := filepath.Join(a.rootPath(), "diff") + entries, err := os.ReadDir(diffsDir) + if err != nil { + return nil, err + } + results := make([]string, 0, len(entries)) + for _, entry := range entries { + if !entry.IsDir() { + continue + } + results = append(results, entry.Name()) + } + return results, nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (a *Driver) AdditionalImageStores() []string { + return nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if opts == nil { + opts = &graphdriver.CreateOpts{} + } + return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id, parent); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id, parent string) error { + paths := []string{ + "mnt", + "diff", + } + + // Directory permission is 0555. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair() + rootPerms := defaultPerms + if parent != "" { + st, err := system.Stat(path.Join(a.rootPath(), p, parent)) + if err != nil { + return err + } + rootPerms = os.FileMode(st.Mode()) + rootPair.UID = int(st.UID()) + rootPair.GID = int(st.GID()) + } + if err := idtools.MkdirAllAndChownNew(path.Join(a.rootPath(), p, id), rootPerms, rootPair); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + "layer": id, + }) + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + if os.IsNotExist(err) { + break + } + return err + } + if !mounted { + break + } + + err = a.unmount(mountpoint) + if err == nil { + break + } + + if err != unix.EBUSY { + return fmt.Errorf("aufs: unmount error: %s: %w", mountpoint, err) + } + if retries >= 5 { + return fmt.Errorf("aufs: unmount error after retries: %s: %w", mountpoint, err) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("removing layers dir for %s: %w", id, err) + } + + if err := atomicRemove(a.getDiffPath(id)); err != nil { + return fmt.Errorf("could not remove diff path for id %s: %w", id, err) + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that container runtime doesn't find it anymore) before doing removal of + // the whole tree. + if err := atomicRemove(mountpoint); err != nil { + if errors.Is(err, unix.EBUSY) { + logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") + } + return fmt.Errorf("could not remove mountpoint for id %s: %w", id, err) + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +func atomicRemove(source string) error { + target := source + "-removing" + + err := os.Rename(source, target) + switch { + case err == nil, os.IsNotExist(err): + case os.IsExist(err): + // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove + if _, e := os.Stat(source); !os.IsNotExist(e) { + return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err) + } + default: + return fmt.Errorf("preparing atomic delete: %w", err) + } + + return system.EnsureRemoveAll(target) +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, parents, options); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For AUFS, it queries the mountpoint for this ID. +func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + return directory.Usage(m) +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, idMappings *idtools.IDMappings, diff io.Reader) error { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, options) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, options.Mappings, options.Diff); err != nil { + return + } + + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, options); err != nil { + return fmt.Errorf("creating aufs mount to %s: %w", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len(",dirperm1") + } + b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + mountOptions := a.mountOptions + if len(options.Options) > 0 { + mountOptions = strings.Join(options.Options, ",") + } + if mountOptions != "" { + opts += fmt.Sprintf(",%s", mountOptions) + } + + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel) + if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := os.MkdirTemp("", "storage-aufs-base") + if err != nil { + logrus.Errorf("Checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := os.MkdirTemp("", "storage-aufs-union") + if err != nil { + logrus.Errorf("Checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("Checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} + +// UpdateLayerIDMap updates ID mappings in a layer from matching the ones +// specified by toContainer to those specified by toHost. +func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("aufs doesn't support changing ID mappings") +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (a *Driver) SupportsShifting() bool { + return false +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go new file mode 100644 index 0000000000..27e6216331 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -0,0 +1,64 @@ +//go:build linux +// +build linux + +package aufs + +import ( + "bufio" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := os.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go new file mode 100644 index 0000000000..156f4a4f0e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -0,0 +1,22 @@ +//go:build linux +// +build linux + +package aufs + +import ( + "os/exec" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := unix.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go new file mode 100644 index 0000000000..937104ba3f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "golang.org/x/sys/unix" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return unix.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go new file mode 100644 index 0000000000..fd93d4e84d --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -0,0 +1,695 @@ +//go:build linux && cgo +// +build linux,cgo + +package btrfs + +/* +#include +#include + +// keep struct field name compatible with btrfs-progs < 6.1. +#define max_referenced max_rfer +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "io/fs" + "math" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "unsafe" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const defaultPerms = os.FileMode(0o555) + +func init() { + graphdriver.MustRegister("btrfs", Init) +} + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(filepath.Join(home, "subvolumes"), 0o700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, userDiskQuota, err := parseOptions(options.DriverOptions) + if err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + options: opt, + } + + if userDiskQuota { + if err := driver.enableQuota(); err != nil { + return nil, err + } + } + + return graphdriver.NewNaiveDiffDriver(driver, graphdriver.NewNaiveLayerIDMapUpdater(driver)), nil +} + +func parseOptions(opt []string) (btrfsOptions, bool, error) { + var options btrfsOptions + userDiskQuota := false + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, userDiskQuota, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, userDiskQuota, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + case "btrfs.mountopt": + return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options") + default: + return options, userDiskQuota, fmt.Errorf("unknown option %s (%q)", key, option) + } + } + return options, userDiskQuota, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + // root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions + quotaEnabled bool + once sync.Once +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// Metadata returns empty metadata for this driver. +func (d *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil //nolint: nilnil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("can't open dir %s", path) + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("failed to create btrfs subvolume: %w", errno) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + cs := C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("failed to create btrfs snapshot: %w", errno) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat unix.Stat_t + if err := unix.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string, quotaEnabled bool) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, d fs.DirEntry, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("walking subvolumes: %w", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if d.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("failed to test if %s is a btrfs subvolume: %w", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil { + return fmt.Errorf("failed to destroy btrfs child subvolume (%s) of parent (%s): %w", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("recursively walking subvolumes for %s failed: %w", dirpath, err) + } + + if quotaEnabled { + if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { + var args C.struct_btrfs_ioctl_qgroup_create_args + args.qgroupid = C.__u64(qgroupid) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + } + } else { + logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + } + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("failed to destroy btrfs snapshot %s for %s: %w", dirpath, name, errno) + } + return nil +} + +func (d *Driver) updateQuotaStatus() { + d.once.Do(func() { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if err := qgroupStatus(d.home); err != nil { + // quota is still not enabled + return + } + d.quotaEnabled = true + } + }) +} + +func (d *Driver) enableQuota() error { + d.updateQuotaStatus() + + if d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("failed to enable btrfs quota for %s: %w", dir, errno) + } + + d.quotaEnabled = true + + return nil +} + +func (d *Driver) subvolRescanQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("failed to rescan btrfs quota for %s: %w", dir, errno) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_rfer = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("failed to limit qgroup for %s: %w", dir, errno) + } + + return nil +} + +// qgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path +// with search key of BTRFS_QGROUP_STATUS_KEY. +// In case qgroup is enabled, the returned key type will match BTRFS_QGROUP_STATUS_KEY. +// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 +func qgroupStatus(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_search_args + args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID + args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_objectid = C.__u64(math.MaxUint64) + args.key.max_offset = C.__u64(math.MaxUint64) + args.key.max_transid = C.__u64(math.MaxUint64) + args.key.nr_items = 4096 + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("failed to search qgroup for %s: %w", path, errno) + } + sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) + if sh._type != C.BTRFS_QGROUP_STATUS_KEY { + return fmt.Errorf("invalid qgroup search header type for %s: %v", path, sh._type) + } + return nil +} + +func subvolLookupQgroup(path string) (uint64, error) { + dir, err := openDir(path) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_ino_lookup_args + args.objectid = C.BTRFS_FIRST_FREE_OBJECTID + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return 0, fmt.Errorf("failed to lookup qgroup for %s: %w", dir, errno) + } + if args.treeid == 0 { + return 0, fmt.Errorf("invalid qgroup id for %s: 0", dir) + } + + return uint64(args.treeid), nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) quotasDir() string { + return path.Join(d.home, "quotas") +} + +func (d *Driver) quotasDirID(id string) string { + return path.Join(d.quotasDir(), id) +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + quotas := d.quotasDir() + subvolumes := d.subvolumesDir() + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0o700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + if err := os.Chmod(path.Join(subvolumes, id), defaultPerms); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + if err := idtools.MkdirAllAs(quotas, 0o700, rootUID, rootGID); err != nil { + return err + } + if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("unknown option %s (%q)", key, storageOpt) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if err := d.enableQuota(); err != nil { + return err + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + quotasDir := d.quotasDirID(id) + if _, err := os.Stat(quotasDir); err == nil { + if err := os.Remove(quotasDir); err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } + + // Call updateQuotaStatus() to invoke status update + d.updateQuotaStatus() + + if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { + if d.quotaEnabled { + return err + } + // If quota is not enabled, fallback to rmdir syscall to delete subvolumes. + // This would allow unprivileged user to delete their owned subvolumes + // in kernel >= 4.18 without user_subvol_rm_alowed mount option. + } + if err := system.EnsureRemoveAll(dir); err != nil { + return err + } + if err := d.subvolRescanQuota(); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + for _, opt := range options.Options { + if opt == "ro" { + // ignore "ro" option + continue + } + return "", fmt.Errorf("btrfs driver does not support mount options") + } + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + if quota, err := os.ReadFile(d.quotasDirID(id)); err == nil { + if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { + if err := d.enableQuota(); err != nil { + return "", err + } + if err := subvolLimitQgroup(dir, size); err != nil { + return "", err + } + } + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For BTRFS, it queries the subvolumes path for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.subvolumesDirID(id)) +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} + +// List all of the layers known to the driver. +func (d *Driver) ListLayers() ([]string, error) { + entries, err := os.ReadDir(d.subvolumesDir()) + if err != nil { + return nil, err + } + results := make([]string, 0, len(entries)) + for _, entry := range entries { + if !entry.IsDir() { + continue + } + results = append(results, entry.Name()) + } + return results, nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go new file mode 100644 index 0000000000..c7d9d3b845 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go @@ -0,0 +1,4 @@ +//go:build !linux || !cgo +// +build !linux !cgo + +package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go new file mode 100644 index 0000000000..5816139f35 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go @@ -0,0 +1,27 @@ +//go:build linux && !btrfs_noversion && cgo +// +build linux,!btrfs_noversion,cgo + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go new file mode 100644 index 0000000000..a61d8fbd9a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go @@ -0,0 +1,15 @@ +//go:build linux && btrfs_noversion && cgo +// +build linux,btrfs_noversion,cgo + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go new file mode 100644 index 0000000000..ca43c3f057 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -0,0 +1,135 @@ +package graphdriver + +import ( + "bytes" + "errors" + "fmt" + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + "github.com/opencontainers/selinux/pkg/pwalk" +) + +const ( + chownByMapsCmd = "storage-chown-by-maps" +) + +func init() { + reexec.Register(chownByMapsCmd, chownByMapsMain) +} + +func chownByMapsMain() { + if len(os.Args) < 2 { + fmt.Fprintf(os.Stderr, "requires mapping configuration on stdin and directory path") + os.Exit(1) + } + // Read and decode our configuration. + discreteMaps := [4][]idtools.IDMap{} + config := bytes.Buffer{} + if _, err := config.ReadFrom(os.Stdin); err != nil { + fmt.Fprintf(os.Stderr, "error reading configuration: %v", err) + os.Exit(1) + } + if err := json.Unmarshal(config.Bytes(), &discreteMaps); err != nil { + fmt.Fprintf(os.Stderr, "error decoding configuration: %v", err) + os.Exit(1) + } + // Try to chroot. This may not be possible, and on some systems that + // means we just Chdir() to the directory, so from here on we should be + // using relative paths. + if err := chrootOrChdir(os.Args[1]); err != nil { + fmt.Fprintf(os.Stderr, "error chrooting to %q: %v", os.Args[1], err) + os.Exit(1) + } + // Build the mapping objects. + toContainer := idtools.NewIDMappingsFromMaps(discreteMaps[0], discreteMaps[1]) + if len(toContainer.UIDs()) == 0 && len(toContainer.GIDs()) == 0 { + toContainer = nil + } + toHost := idtools.NewIDMappingsFromMaps(discreteMaps[2], discreteMaps[3]) + if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { + toHost = nil + } + + chowner := newLChowner() + + chown := func(path string, info os.FileInfo, _ error) error { + if path == "." { + return nil + } + return chowner.LChown(path, info, toHost, toContainer) + } + if err := pwalk.Walk(".", chown); err != nil { + fmt.Fprintf(os.Stderr, "error during chown: %v", err) + os.Exit(1) + } + os.Exit(0) +} + +// ChownPathByMaps walks the filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error { + if toContainer == nil { + toContainer = &idtools.IDMappings{} + } + if toHost == nil { + toHost = &idtools.IDMappings{} + } + + config, err := json.Marshal([4][]idtools.IDMap{toContainer.UIDs(), toContainer.GIDs(), toHost.UIDs(), toHost.GIDs()}) + if err != nil { + return err + } + cmd := reexec.Command(chownByMapsCmd, path) + cmd.Stdin = bytes.NewReader(config) + output, err := cmd.CombinedOutput() + if len(output) > 0 && err != nil { + return fmt.Errorf("%s: %w", string(output), err) + } + if err != nil { + return err + } + if len(output) > 0 { + return errors.New(string(output)) + } + + return nil +} + +type naiveLayerIDMapUpdater struct { + ProtoDriver +} + +// NewNaiveLayerIDMapUpdater wraps the ProtoDriver in a LayerIDMapUpdater that +// uses ChownPathByMaps to update the ownerships in a layer's filesystem tree. +func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater { + return &naiveLayerIDMapUpdater{ProtoDriver: driver} +} + +// UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) (retErr error) { + driver := n.ProtoDriver + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return err + } + defer driverPut(driver, id, &retErr) + + return ChownPathByMaps(layerFs, toContainer, toHost) +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (n *naiveLayerIDMapUpdater) SupportsShifting() bool { + return false +} diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go new file mode 100644 index 0000000000..d6150ceeee --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go @@ -0,0 +1,109 @@ +//go:build darwin +// +build darwin + +package graphdriver + +import ( + "errors" + "fmt" + "os" + "sync" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]bool +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]bool), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + st, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + c.mutex.Lock() + _, found := c.inodes[i] + if !found { + c.inodes[i] = true + } + c.mutex.Unlock() + + if found { + return nil + } + + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUID, mappedGID, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err) + } + mappedUID, mappedGID = uid, gid + } + uid, gid = mappedUID, mappedGID + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHostOverflow(pair) + if err != nil { + return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + capability, err := system.Lgetxattr(path, "security.capability") + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + + // Make the change. + if err := system.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + // Restore the SUID and SGID bits if they were originally set. + if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { + if err := system.Chmod(path, info.Mode()); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + } + if capability != nil { + if err := system.Lsetxattr(path, "security.capability", capability, 0); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + } + + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go new file mode 100644 index 0000000000..42c12c6279 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -0,0 +1,127 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +package graphdriver + +import ( + "errors" + "fmt" + "os" + "sync" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]string +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]string), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + st, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + + c.mutex.Lock() + + oldTarget, found := c.inodes[i] + if !found { + c.inodes[i] = path + } + + // If we are dealing with a file with multiple links then keep the lock until the file is + // chowned to avoid a race where we link to the old version if the file is copied up. + if found || st.Nlink > 1 { + defer c.mutex.Unlock() + } else { + c.mutex.Unlock() + } + + if found { + // If the dev/inode was already chowned then create a link to the old target instead + // of chowning it again. This is necessary when the underlying file system breaks + // inodes on copy-up (as it is with overlay with index=off) to maintain the original + // link and correct file ownership. + + // The target already exists so remove it before creating the link to the new target. + if err := os.Remove(path); err != nil { + return err + } + return os.Link(oldTarget, path) + } + + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUID, mappedGID, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err) + } + mappedUID, mappedGID = uid, gid + } + uid, gid = mappedUID, mappedGID + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHostOverflow(pair) + if err != nil { + return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + cap, err := system.Lgetxattr(path, "security.capability") + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + + // Make the change. + if err := system.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + // Restore the SUID and SGID bits if they were originally set. + if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { + if err := system.Chmod(path, info.Mode()); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + } + if cap != nil { + if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + } + + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go new file mode 100644 index 0000000000..06ccf9fa47 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -0,0 +1,21 @@ +//go:build windows +// +build windows + +package graphdriver + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" +) + +type platformChowner struct{} + +func newLChowner() *platformChowner { + return &platformChowner{} +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + return &os.PathError{"lchown", path, syscall.EWINDOWS} +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go new file mode 100644 index 0000000000..9a1c6751f8 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go @@ -0,0 +1,22 @@ +//go:build linux || darwin || freebsd || solaris +// +build linux darwin freebsd solaris + +package graphdriver + +import ( + "fmt" + "os" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("chrooting to %q: %w", path, err) + } + if err := syscall.Chdir(string(os.PathSeparator)); err != nil { + return fmt.Errorf("changing to %q: %w", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go new file mode 100644 index 0000000000..d5eb5ed983 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go @@ -0,0 +1,15 @@ +package graphdriver + +import ( + "fmt" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chdir(path); err != nil { + return fmt.Errorf("changing to %q: %w", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go new file mode 100644 index 0000000000..9c3d7c668f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -0,0 +1,307 @@ +//go:build cgo +// +build cgo + +package copy + +/* +#include + +#ifndef FICLONE +#define FICLONE _IOW(0x94, 9, int) +#endif +*/ +import "C" + +import ( + "container/list" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + "golang.org/x/sys/unix" +) + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota + // Hardlink creates a new hardlink to the existing file + Hardlink +) + +// CopyRegularToFile copies the content of a file to another +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + if *copyWithFileClone { + _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) + if err == nil { + return nil + } + + *copyWithFileClone = false + if err == unix.EXDEV { + *copyWithFileRange = false + } + } + if *copyWithFileRange { + err = doCopyWithFileRange(srcFile, dstFile, fileinfo) + // Trying the file_clone may not have caught the exdev case + // as the ioctl may not have been available (therefore EINVAL) + if err == unix.EXDEV || err == unix.ENOSYS { + *copyWithFileRange = false + } else { + return err + } + } + return legacyCopy(srcFile, dstFile) +} + +// CopyRegular copies the content of a file to another +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint + // If the destination file already exists, we shouldn't blow it away + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) + if err != nil { + return err + } + defer dstFile.Close() + + return CopyRegularToFile(srcPath, dstFile, fileinfo, copyWithFileRange, copyWithFileClone) +} + +func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { + amountLeftToCopy := fileinfo.Size() + + for amountLeftToCopy > 0 { + n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0) + if err != nil { + return err + } + + amountLeftToCopy = amountLeftToCopy - int64(n) + } + + return nil +} + +func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { + _, err := pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +type fileID struct { + dev uint64 + ino uint64 +} + +type dirMtimeInfo struct { + dstPath *string + stat *syscall.Stat_t +} + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling xattrs, and soft links +// +// Copying xattrs can be opted out of by passing false for copyXattrs. +func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { + copyWithFileRange := true + copyWithFileClone := true + + // This is a map of source file inodes to dst file paths + copiedFiles := make(map[fileID]string) + + dirsToSetMtimes := list.New() + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch mode := f.Mode(); { + case mode.IsRegular(): + id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} + if copyMode == Hardlink { + isHardlink = true + if err2 := os.Link(srcPath, dstPath); err2 != nil { + return err2 + } + } else if hardLinkDstPath, ok := copiedFiles[id]; ok { + if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil { + return err2 + } + } else { + if err2 := CopyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil { + return err2 + } + copiedFiles[id] = dstPath + } + + case mode.IsDir(): + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case mode&os.ModeSymlink != 0: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case mode&os.ModeNamedPipe != 0: + if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case mode&os.ModeSocket != 0: + s, err := net.Listen("unix", dstPath) + if err != nil { + return err + } + s.Close() + + case mode&os.ModeDevice != 0: + if unshare.IsRootless() { + // cannot create a device if running in user namespace + return nil + } + if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("unknown file type with mode %v for %s", mode, srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if copyXattrs { + if err := doCopyXattrs(srcPath, dstPath); err != nil { + return err + } + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + // nolint: unconvert + if f.IsDir() { + dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) + } else if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() { + mtimeInfo := e.Value.(*dirMtimeInfo) + ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim} + if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil { + return err + } + } + + return nil +} + +func doCopyXattrs(srcPath, dstPath string) error { + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + xattrs, err := system.Llistxattr(srcPath) + if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + return err + } + + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + if err := copyXattr(srcPath, dstPath, key); err != nil { + return err + } + } + } + + if unshare.IsRootless() { + return nil + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + return copyXattr(srcPath, dstPath, "trusted.overlay.opaque") +} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go new file mode 100644 index 0000000000..5a4629b745 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -0,0 +1,41 @@ +//go:build !linux || !cgo +// +build !linux !cgo + +package copy //nolint: predeclared + +import ( + "io" + "os" + + "github.com/containers/storage/pkg/chrootarchive" +) + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota +) + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling soft links +func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} + +// CopyRegularToFile copies the content of a file to another +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive,golint // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters" + f, err := os.Open(srcPath) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(dstFile, f) + return err +} + +// CopyRegular copies the content of a file to another +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive,golint // "func name will be used as copy.CopyRegular by other packages, and that stutters" + return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath) +} diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go new file mode 100644 index 0000000000..964dcaf2fd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -0,0 +1,68 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } else if !c.checker.IsMounted(path) { + // if the unmount was performed outside of this process (e.g. conmon cleanup) + // the ref counter would lose track of it. Check if it is still mounted. + m.count = 0 + } + infoOp(m) + count := m.count + if count <= 0 { + // If the mounted path has been decremented enough have no references, + // then its entry can be removed. + delete(c.counts, path) + } + c.mu.Unlock() + return count +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go new file mode 100644 index 0000000000..388602b634 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -0,0 +1,254 @@ +//go:build linux && cgo +// +build linux,cgo + +package devmapper + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" +) + +type directLVMConfig struct { + Device string + ThinpPercent uint64 + ThinpMetaPercent uint64 + AutoExtendPercent uint64 + AutoExtendThreshold uint64 + MetaDataSize string +} + +var ( + errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") + errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") + errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm") +) + +func validateLVMConfig(cfg directLVMConfig) error { + if cfg.Device == "" { + return errMissingSetupDevice + } + if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debugf("could not find lvmdiskscan: %v", err) + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return fmt.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debugf("could not find pvdisplay: %v", err) + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return fmt.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debugf("could not find blkid %v", err) + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return fmt.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + absPath, err := filepath.Abs(dev) + if err != nil { + return fmt.Errorf("unable to get absolute path for %s: %s", dev, err) + } + realPath, err := filepath.EvalSymlinks(absPath) + if err != nil { + return fmt.Errorf("failed to canonicalise path for %s: %s", dev, err) + } + if err := checkDevAvailable(absPath); err != nil { + logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath) + if err := checkDevAvailable(realPath); err != nil { + return fmt.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device", absPath, realPath) + } + } + if err := checkDevInVG(realPath); err != nil { + return err + } + + if force { + return nil + } + + if err := checkDevHasFS(realPath); err != nil { + return err + } + return nil +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := os.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, fmt.Errorf("reading existing setup config: %w", err) + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + if err := json.Unmarshal(b, &cfg); err != nil { + return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err) + } + return cfg, nil +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return fmt.Errorf("marshalling direct lvm config: %w", err) + } + if err := os.WriteFile(p, b, 0o600); err != nil { + return fmt.Errorf("writing direct lvm config to file: %w", err) + } + return nil +} + +func setupDirectLVM(cfg directLVMConfig) error { + lvmProfileDir := "/etc/lvm/profile" + binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} + + for _, bin := range binaries { + if _, err := exec.LookPath(bin); err != nil { + return fmt.Errorf("looking up command `"+bin+"` while setting up direct lvm: %w", err) + } + } + + err := os.MkdirAll(lvmProfileDir, 0o755) + if err != nil { + return fmt.Errorf("creating lvm profile directory: %w", err) + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + if cfg.MetaDataSize == "" { + cfg.MetaDataSize = "128k" + } + + out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput() + if err != nil { + return fmt.Errorf("%v: %w", string(out), err) + } + + out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() + if err != nil { + return fmt.Errorf("%v: %w", string(out), err) + } + + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return fmt.Errorf("%v: %w", string(out), err) + } + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return fmt.Errorf("%v: %w", string(out), err) + } + + out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() + if err != nil { + return fmt.Errorf("%v: %w", string(out), err) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0o600) + if err != nil { + return fmt.Errorf("writing storage thinp autoextend profile: %w", err) + } + + out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %w", string(out), err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go new file mode 100644 index 0000000000..5d8df8a78e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -0,0 +1,2888 @@ +//go:build linux && cgo +// +build linux,cgo + +package devmapper + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "time" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/dmesg" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/loopback" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/parsers/kernel" + units "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool +) + +const ( + deviceSetMetaFile = "deviceset-metadata" + transactionMetaFile = "transaction-metadata" + xfs = "xfs" + ext4 = "ext4" + base = "base" +) + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataSize string + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 // min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = base + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = base + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0o700, uid, gid); err != nil { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %w", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := os.CreateTemp(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + return (devices.deviceIDMap[deviceID/8] & mask) == 0 +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { + // Skip some of the meta files which are not device files. + if strings.HasSuffix(name, ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(name, ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if name == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if name == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(name); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%w", name, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + scan := func(path string, d fs.DirEntry, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s: %v", path, err) + return nil + } + + // Skip any directories + if d.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, d.Name()) + } + + return filepath.WalkDir(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v)", hash) + info := &devInfo{ + Hash: hash, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error +func xfsSupported() error { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return err // error text is descriptive enough + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", xfs).Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return fmt.Errorf("checking for xfs support: %w", err) + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return nil + } + } + + if err := s.Err(); err != nil { + return fmt.Errorf("checking for xfs support: %w", err) + } + + return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) +} + +func determineDefaultFS() string { + err := xfsSupported() + if err == nil { + return xfs + } + + logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to %s filesystem", ext4, err) + return ext4 +} + +// mkfsOptions tries to figure out whether some additional mkfs options are required +func mkfsOptions(fs string) []string { + if fs == xfs && !kernel.CheckKernelVersion(3, 16, 0) { + // For kernels earlier than 3.16 (and newer xfsutils), + // some xfs features need to be explicitly disabled. + return []string{"-m", "crc=0,finobt=0"} + } + + return []string{} +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + args := mkfsOptions(devices.filesystem) + args = append(args, devices.mkfsArgs...) + args = append(args, devname) + + logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case xfs: + err = exec.Command("mkfs.xfs", args...).Run() + case ext4: + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := os.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + // Cleanup right away if there are any leaked devices. Note this + // could cause some slowdown for process startup, if there were + // Leaked devices + devices.cleanupDeletedDevices() + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%w", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if !errors.Is(err, devicemapper.ErrEnxio) { + return err + } + devinfo = nil + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := os.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%w", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %w", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %w", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %w", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/containers/storage/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0o700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == xfs { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) + } + + defer func() { + if err := mount.Unmount(fsMountPoint); err != nil { + logrus.Warnf("devmapper.growFS cleanup error: %v", err) + } + }() + + switch devices.BaseDeviceFilesystem { + case ext4: + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) + } + case xfs: + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) + } + default: + return fmt.Errorf("unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + fileEntries, _ := os.ReadDir("/proc/self/fd") + for _, e := range fileEntries { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", e.Name())) + if link == name { + fd, err := strconv.Atoi(e.Name()) + if err == nil { + unix.CloseOnExec(fd) + } + } + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := os.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + return os.RemoveAll(devices.transactionMetaFile()) +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := os.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + // Kernel driver version >= 4.27.0 support deferred removal + + logrus.Debugf("devicemapper: kernel dm driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return fmt.Errorf("unable to parse driver major version %q as a number: %w", versionSplit[0], graphdriver.ErrNotSupported) + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return fmt.Errorf("unable to parse driver minor version %q as a number: %w", versionSplit[1], graphdriver.ErrNotSupported) + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + var stat unix.Stat_t + err := unix.Stat(file.Name(), &stat) + if err != nil { + return 0, 0, err + } + + dev := stat.Rdev + majorNum := major(uint64(dev)) + minorNum := minor(uint64(dev)) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + // create the root dir of the devmapper driver ownership to match this + // daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0o700, uid, gid); err != nil { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0o700); err != nil { + return err + } + if err := idtools.MkdirAs(filepath.Join(devices.root, "mnt"), 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) { + return err + } + + prevSetupConfig, err := readLVMConfig(devices.root) + if err != nil { + return err + } + + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "storage-thinpool" + logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the storage root dir + var st unix.Stat_t + if err := unix.Stat(devices.root, &st); err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // container-maj,min[-inode] stands for: + // - Managed by container storage + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir()) + if err != nil { + return err + } + switch fsMagic { + case graphdriver.FsMagicAufs: + return fmt.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") + } + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0o600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0o600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + defer func() { + if retErr != nil { + err = devices.deactivatePool() + if err != nil { + logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) + } + } + }() + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev`.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) error { + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || !errors.Is(err, devicemapper.ErrBusy) { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.WithField("storage-driver", "devicemapper").Debugf("Error opening transaction hash = %s deviceId = %d", info.Hash, info.DeviceID) + return err + } + + defer devices.closeTransaction() + + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + // If deferred removal is enabled and deferred deletion is disabled + // then make sure device is removed synchronously. There have been + // some cases of device being busy for short duration and we would + // rather busy wait for device removal to take care of these cases. + deferredRemove := devices.deferredRemove + if !devices.deferredDelete { + deferredRemove = false + } + + if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteDeviceNoLock(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + return devices.deactivateDeviceMode(info, devices.deferredRemove) +} + +func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { + var err error + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if deferredRemove { + err = devicemapper.RemoveDeviceDeferred(info.Name()) + } else { + err = devices.removeDevice(info.Name()) + } + + // This function's semantics is such that it does not return an + // error if device does not exist. So if device went away by + // the time we actually tried to remove it, do not return error. + if !errors.Is(err, devicemapper.ErrEnxio) { + return err + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if !errors.Is(err, devicemapper.ErrBusy) { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if !errors.Is(err, devicemapper.ErrEnxio) { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if !errors.Is(err, devicemapper.ErrBusy) { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { + files, err := os.ReadDir(dir) + if err != nil { + logrus.Warnf("devmapper: unmountAndDeactivate: %s", err) + return + } + + for _, d := range files { + if !d.IsDir() { + continue + } + + name := d.Name() + fullname := path.Join(dir, name) + + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := mount.Unmount(fullname); err != nil { + logrus.Warnf("devmapper.Shutdown error: %s", err) + } + + if devInfo, err := devices.lookupDevice(name); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", name, err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s, error: %s", devInfo.Hash, err) + } + } + } +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + devices.unmountAndDeactivateAll(path.Join(home, "mnt")) + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base, error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool, error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%w", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%w", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%w", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == xfs { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + mountOptions := devices.mountOptions + if len(moptions.Options) > 0 { + addNouuid := strings.Contains("nouuid", mountOptions) + mountOptions = strings.Join(moptions.Options, ",") + if addNouuid { + mountOptions = fmt.Sprintf("nouuid,%s", mountOptions) + } + } + + options = joinMountOptions(options, mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) + } + + if fstype == xfs && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + if err := mount.Unmount(path); err != nil { + logrus.Warnf("devmapper.MountDevice cleanup error: %v", err) + } + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := mount.Unmount(mountPath); err != nil { + if ok, _ := Mounted(mountPath); ok { + return err + } + } + logrus.Debug("devmapper: Unmount done") + + // Remove the mountpoint here. Removing the mountpoint (in newer kernels) + // will cause all other instances of this mount in other mount namespaces + // to be killed (this is an anti-DoS measure that is necessary for things + // like devicemapper). This is necessary to avoid cases where a libdm mount + // that is present in another namespace will cause subsequent RemoveDevice + // operations to fail. We ignore any errors here because this may fail on + // older kernels which don't have + // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. + if err := os.Remove(mountPath); err != nil { + logrus.Debugf("devmapper: error doing a remove on unmounted device %s: %v", mountPath, err) + } + + return devices.deactivateDevice(info) +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +// ListLayers returns a list of device IDs, omitting the ""/"base" device and +// any which have been marked as deleted. +func (devices *DeviceSet) ListLayers() ([]string, error) { + if err := devices.cleanupDeletedDevices(); err != nil { + return nil, err + } + + devices.Lock() + defer devices.Unlock() + + ids := make([]string, 0, len(devices.Devices)) + for k, d := range devices.Devices { + if k == "" || d.Deleted { + continue + } + ids = append(ids, k) + } + return ids, nil +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + logrus.Debugf("could not find devicemapper status: %v", err) + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err != nil { + logrus.Debugf("could not find scanf devicemapper status: %v", err) + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(unix.Statfs_t) + if err := unix.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return nil, graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return nil, graphdriver.ErrNotSupported + } + + if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { + // enable deferred stuff by default + enableDeferredDeletion = true + enableDeferredRemoval = true + } + + foundBlkDiscard := false + var lvmSetupConfig directLVMConfig + testMode := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != ext4 && val != xfs { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt", "devicemapper.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.metadata_size": + devices.metaDataSize = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.metaDataSize": + lvmSetupConfig.MetaDataSize = val + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse `dm.thinp_percent=%s`: %w", val, err) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse `dm.thinp_metapercent=%s`: %w", val, err) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_percent=%s`: %w", val, err) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_threshold=%s`: %w", val, err) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per + case "dm.libdm_log_level": + level, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse `dm.libdm_log_level=%s`: %w", val, err) + } + if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { + return nil, fmt.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) + } + // Register a new logging callback with the specified level. + devicemapper.LogInit(devicemapper.DefaultLogger{ + Level: int(level), + }) + case "test": + testMode, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("devmapper: Unknown option %s", key) + } + } + + if !testMode { + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + } + + devices.lvmSetupConfig = lvmSetupConfig + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go new file mode 100644 index 0000000000..f85fb94790 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go @@ -0,0 +1,109 @@ +//go:build linux && cgo +// +build linux,cgo + +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go new file mode 100644 index 0000000000..8b8a1d1778 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -0,0 +1,271 @@ +//go:build linux && cgo +// +build linux,cgo + +package devmapper + +import ( + "fmt" + "os" + "path" + "strconv" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + "github.com/containers/storage/pkg/mount" + units "github.com/docker/go-units" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const defaultPerms = os.FileMode(0o555) + +func init() { + graphdriver.MustRegister("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + locker *locker.Locker +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options.DriverOptions, options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + locker: locker.New(), + } + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, + {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, + {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, + {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, + {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, + {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, + {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, + {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// Metadata returns a map of information about the device. +func (d *Driver) Metadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + umountErr := mount.Unmount(d.home) + // in case we have two errors, prefer the one from Shutdown() + if err != nil { + return err + } + + return umountErr +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return fmt.Errorf("failed to remove device %s: %v", id, err) + } + + // Most probably the mount point is already removed on Put() + // (see DeviceSet.UnmountDevice()), but just in case it was not + // let's try to remove it here as well, ignoring errors as + // an older kernel can return EBUSY if e.g. the mount was leaked + // to other mount namespaces. A failure to remove the container's + // mount point is not important and should not be treated + // as a failure to remove the container. + mp := path.Join(d.home, "mnt", id) + err := unix.Rmdir(mp) + if err != nil && !os.IsNotExist(err) { + logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err) + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0o755, uid, gid); err != nil { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0o755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, options); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, defaultPerms, uid, gid); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err) + } + + return err +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For devmapper, it queries the mnt path for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + return directory.Usage(path.Join(d.home, "mnt", id)) +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go new file mode 100644 index 0000000000..52f0e863e5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go @@ -0,0 +1,8 @@ +//go:build linux && cgo +// +build linux,cgo + +package devmapper + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go new file mode 100644 index 0000000000..db903ca552 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go @@ -0,0 +1,89 @@ +//go:build linux && cgo +// +build linux,cgo + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + var mntpointSt unix.Stat_t + if err := unix.Stat(mountpoint, &mntpointSt); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + var parentSt unix.Stat_t + if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { + return false, err + } + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go new file mode 100644 index 0000000000..ab32d652e7 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -0,0 +1,472 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = errors.New("backing file system is unsupported for this graph driver") + // ErrLayerUnknown returned when the specified layer is unknown by the driver. + ErrLayerUnknown = errors.New("unknown layer") +) + +// CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string + *idtools.IDMappings + ignoreChownErrors bool +} + +// MountOpts contains optional arguments for Driver.Get() methods. +type MountOpts struct { + // Mount label is the MAC Labels to assign to mount point (SELINUX) + MountLabel string + // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point + UidMaps []idtools.IDMap //nolint: revive,golint + GidMaps []idtools.IDMap //nolint: revive,golint + Options []string + + // Volatile specifies whether the container storage can be optimized + // at the cost of not syncing all the dirty files in memory. + Volatile bool + + // DisableShifting forces the driver to not do any ID shifting at runtime. + DisableShifting bool +} + +// ApplyDiffOpts contains optional arguments for ApplyDiff methods. +type ApplyDiffOpts struct { + Diff io.Reader + Mappings *idtools.IDMappings + MountLabel string + IgnoreChownErrors bool + ForceMask *os.FileMode +} + +// InitFunc initializes the storage driver. +type InitFunc func(homedir string, options Options) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // CreateFromTemplate creates a new filesystem layer with the specified id + // and parent, with contents identical to the specified template layer. + CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Optionally it gets the mappings used to create the layer. + // Returns the absolute path to the mounted layered filesystem. + Get(id string, options MountOpts) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Returns a list of layer ids that exist on this driver (does not include + // additional storage layers). Not supported by all backends. + // If the driver requires that layers be removed in a particular order, + // usually due to parent-child relationships that it cares about, The + // list should be sorted well enough so that if all layers need to be + // removed, they can be removed in the order in which they're returned. + ListLayers() ([]string, error) + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + Metadata(id string) (map[string]string, error) + // ReadWriteDiskUsage returns the disk usage of the writable directory for the specified ID. + ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error + // AdditionalImageStores returns additional image stores supported by the driver + // This API is experimental and can be changed without bumping the major version number. + AdditionalImageStores() []string +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The io.Reader must be an uncompressed stream. + ApplyDiff(id string, parent string, options ApplyDiffOpts) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +} + +// LayerIDMapUpdater is the interface that implements ID map changes for layers. +type LayerIDMapUpdater interface { + // UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership + // information using the toContainer and toHost mappings, using them to replace + // on-disk owner UIDs and GIDs which are "host" values in the first map with + // UIDs and GIDs for "host" values from the second map which correspond to the + // same "container" IDs. This method should only be called after a layer is + // first created and populated, and before it is mounted, as other changes made + // relative to a parent layer, but before this method is called, may be discarded + // by Diff(). + UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error + + // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in a + // image and it is not required to Chown the files when running in an user namespace. + SupportsShifting() bool +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver + LayerIDMapUpdater +} + +// DriverWithDifferOutput is the result of ApplyDiffWithDiffer +// This API is experimental and can be changed without bumping the major version number. +type DriverWithDifferOutput struct { + Differ Differ + Target string + Size int64 + UIDs []uint32 + GIDs []uint32 + UncompressedDigest digest.Digest + Metadata string + BigData map[string][]byte + TarSplit []byte + TOCDigest digest.Digest + // Artifacts is a collection of additional artifacts + // generated by the differ that the storage driver can use. + Artifacts map[string]interface{} +} + +type DifferOutputFormat int + +const ( + // DifferOutputFormatDir means the output is a directory and it will + // keep the original layout. + DifferOutputFormatDir = iota + // DifferOutputFormatFlat will store the files by their checksum, in the form + // checksum[0:2]/checksum[2:] + DifferOutputFormatFlat +) + +// DifferOptions overrides how the differ work +type DifferOptions struct { + // Format defines the destination directory layout format + Format DifferOutputFormat +} + +// Differ defines the interface for using a custom differ. +// This API is experimental and can be changed without bumping the major version number. +type Differ interface { + ApplyDiff(dest string, options *archive.TarOptions, differOpts *DifferOptions) (DriverWithDifferOutput, error) +} + +// DriverWithDiffer is the interface for direct diff access. +// This API is experimental and can be changed without bumping the major version number. +type DriverWithDiffer interface { + Driver + // ApplyDiffWithDiffer applies the changes using the callback function. + // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error) + // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. + ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error + // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagingDirectory(stagingDirectory string) error + // DifferTarget gets the location where files are stored for the layer. + DifferTarget(id string) (string, error) +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// AdditionalLayer represents a layer that is stored in the additional layer store +// This API is experimental and can be changed without bumping the major version number. +type AdditionalLayer interface { + // CreateAs creates a new layer from this additional layer + CreateAs(id, parent string) error + + // Info returns arbitrary information stored along with this layer (i.e. `info` file) + Info() (io.ReadCloser, error) + + // Blob returns a reader of the raw contents of this layer. + Blob() (io.ReadCloser, error) + + // Release tells the additional layer store that we don't use this handler. + Release() +} + +// AdditionalLayerStoreDriver is the interface for driver that supports +// additional layer store functionality. +// This API is experimental and can be changed without bumping the major version number. +type AdditionalLayerStoreDriver interface { + Driver + + // LookupAdditionalLayer looks up additional layer store by the specified + // digest and ref and returns an object representing that layer. + LookupAdditionalLayer(d digest.Digest, ref string) (AdditionalLayer, error) + + // LookupAdditionalLayer looks up additional layer store by the specified + // ID and returns an object representing that layer. + LookupAdditionalLayerByID(id string) (AdditionalLayer, error) +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// MustRegister registers an InitFunc for the driver, or panics. +// It is suitable for package’s init() sections. +func MustRegister(name string, initFunc InitFunc) { + if err := Register(name, initFunc); err != nil { + panic(fmt.Sprintf("failed to register containers/storage graph driver %q: %v", name, err)) + } +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config) + } + + logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root) + return nil, fmt.Errorf("failed to GetDriver graph %s %s: %w", name, config.Root, ErrNotSupported) +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, fmt.Errorf("failed to built-in GetDriver graph %s %s: %w", name, home, ErrNotSupported) +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + RunRoot string + ImageStore string + DriverPriority []string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, config) + } + + // Guess for prior driver + driversMap := ScanPriorDrivers(config.Root) + + // use the supplied priority list unless it is empty + prioList := config.DriverPriority + if len(prioList) == 0 { + prioList = Priority + } + + for _, name := range prioList { + if name == "vfs" && len(config.DriverPriority) == 0 { + // don't use vfs even if there is state present and vfs + // has not been explicitly added to the override driver + // priority list + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range prioList { + driver, err := getBuiltinDriver(name, config.Root, config) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("no supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return errors.Is(err, ErrNotSupported) || errors.Is(err, ErrPrerequisites) || errors.Is(err, ErrIncompatibleFS) +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func ScanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil { + driversMap[driver] = true + } + } + return driversMap +} + +// driverPut is driver.Put, but errors are handled either by updating mainErr or just logging. +// Typical usage: +// +// func …(…) (err error) { +// … +// defer driverPut(driver, id, &err) +// } +func driverPut(driver ProtoDriver, id string, mainErr *error) { + if err := driver.Put(id); err != nil { + err = fmt.Errorf("unmounting layer %s: %w", id, err) + if *mainErr == nil { + *mainErr = err + } else { + logrus.Errorf(err.Error()) + } + } +} diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go new file mode 100644 index 0000000000..b60883a9ea --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go @@ -0,0 +1,12 @@ +package graphdriver + +// Slice of drivers that should be used in order +var Priority = []string{ + "vfs", +} + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go new file mode 100644 index 0000000000..a6072ab56a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -0,0 +1,48 @@ +package graphdriver + +import ( + "golang.org/x/sys/unix" + + "github.com/containers/storage/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + Priority = []string{ + "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on FreeBSD. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct{} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go new file mode 100644 index 0000000000..cd806b8ff6 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -0,0 +1,202 @@ +//go:build linux +// +build linux + +package graphdriver + +import ( + "path/filepath" + + "github.com/containers/storage/pkg/mount" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) + // FsMagicFUSE filesystem id for FUSE + FsMagicFUSE = FsMagic(0x65735546) + // FsMagicAcfs filesystem id for Acfs + FsMagicAcfs = FsMagic(0x61636673) + // FsMagicAfs filesystem id for Afs + FsMagicAfs = FsMagic(0x5346414f) + // FsMagicCephFs filesystem id for Ceph + FsMagicCephFs = FsMagic(0x00C36400) + // FsMagicCIFS filesystem id for CIFS + FsMagicCIFS = FsMagic(0xFF534D42) + // FsMagicEROFS filesystem id for EROFS + FsMagicEROFS = FsMagic(0xE0F5E1E2) + // FsMagicFHGFS filesystem id for FHGFS + FsMagicFHGFSFs = FsMagic(0x19830326) + // FsMagicIBRIX filesystem id for IBRIX + FsMagicIBRIX = FsMagic(0x013111A8) + // FsMagicKAFS filesystem id for KAFS + FsMagicKAFS = FsMagic(0x6B414653) + // FsMagicLUSTRE filesystem id for LUSTRE + FsMagicLUSTRE = FsMagic(0x0BD00BD0) + // FsMagicNCP filesystem id for NCP + FsMagicNCP = FsMagic(0x564C) + // FsMagicNFSD filesystem id for NFSD + FsMagicNFSD = FsMagic(0x6E667364) + // FsMagicOCFS2 filesystem id for OCFS2 + FsMagicOCFS2 = FsMagic(0x7461636F) + // FsMagicPANFS filesystem id for PANFS + FsMagicPANFS = FsMagic(0xAAD7AAEA) + // FsMagicPRLFS filesystem id for PRLFS + FsMagicPRLFS = FsMagic(0x7C7C6673) + // FsMagicSMB2 filesystem id for SMB2 + FsMagicSMB2 = FsMagic(0xFE534D42) + // FsMagicSNFS filesystem id for SNFS + FsMagicSNFS = FsMagic(0xBEEFDEAD) + // FsMagicVBOXSF filesystem id for VBOXSF + FsMagicVBOXSF = FsMagic(0x786F4256) + // FsMagicVXFS filesystem id for VXFS + FsMagicVXFS = FsMagic(0xA501FCF5) +) + +var ( + // Slice of drivers that should be used in an order + Priority = []string{ + "overlay", + // We don't support devicemapper without configuration + // "devicemapper", + "aufs", + "btrfs", + "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicEcryptfs: "ecryptfs", + FsMagicEROFS: "erofs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf unix.Statfs_t + path := filepath.Dir(rootpath) + if err := unix.Statfs(path, &buf); err != nil { + return 0, err + } + + if _, ok := FsNames[FsMagic(buf.Type)]; !ok { + logrus.Debugf("Unknown filesystem type %#x reported for %s", buf.Type, path) + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct{} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// isMountPoint checks that the given path is a mount point +func isMountPoint(mountPath string) (bool, error) { + // it is already the root + if mountPath == "/" { + return true, nil + } + + var s1, s2 unix.Stat_t + if err := unix.Stat(mountPath, &s1); err != nil { + return true, err + } + if err := unix.Stat(filepath.Dir(mountPath), &s2); err != nil { + return true, err + } + return s1.Dev != s2.Dev, nil +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + if FsMagic(buf.Type) != fsType { + return false, nil + } + return isMountPoint(mountPath) +} diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go new file mode 100644 index 0000000000..6b6373a377 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -0,0 +1,96 @@ +//go:build solaris && cgo +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" + +import ( + "path/filepath" + "unsafe" + + "github.com/containers/storage/pkg/mount" + "github.com/sirupsen/logrus" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + Priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct{} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +// Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + cs := C.CString(filepath.Dir(mountPath)) + defer C.free(unsafe.Pointer(cs)) + buf := C.getstatfs(cs) + defer C.free(unsafe.Pointer(buf)) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + return false, ErrPrerequisites + } + + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go new file mode 100644 index 0000000000..7dfbef0072 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -0,0 +1,14 @@ +//go:build !linux && !windows && !freebsd && !solaris && !darwin +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package graphdriver + +// Slice of drivers that should be used in an order +var Priority = []string{ + "unsupported", +} + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go new file mode 100644 index 0000000000..54bd139a3c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_windows.go @@ -0,0 +1,12 @@ +package graphdriver + +// Slice of drivers that should be used in order +var Priority = []string{ + "windowsfilter", +} + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go new file mode 100644 index 0000000000..fba9ec4fc0 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -0,0 +1,229 @@ +package graphdriver + +import ( + "io" + "os" + "runtime" + "time" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" +) + +// ApplyUncompressedLayer defines the unpack method used by the graph +// driver. +var ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + LayerIDMapUpdater +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// +// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) +// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) +// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) +// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + options := MountOpts{ + MountLabel: mountLabel, + Options: []string{"ro"}, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driverPut(driver, id, &err) + } + }() + + if parent == "" { + archive, err := archive.TarWithOptions(layerFs, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driverPut(driver, id, &err) + return err + }), nil + } + + options.Options = append(options.Options, "ro") + parentFs, err := driver.Get(parent, options) + if err != nil { + return nil, err + } + defer driverPut(driver, parent, &err) + + changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, idMappings.UIDs(), idMappings.GIDs()) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driverPut(driver, id, &err) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ []archive.Change, retErr error) { + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return nil, err + } + defer driverPut(driver, id, &retErr) + + parentFs := "" + + if parent != "" { + options := MountOpts{ + MountLabel: mountLabel, + Options: []string{"ro"}, + } + parentFs, err = driver.Get(parent, options) + if err != nil { + return nil, err + } + defer driverPut(driver, parent, &retErr) + } + + return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) { + driver := gdw.ProtoDriver + + if options.Mappings == nil { + options.Mappings = &idtools.IDMappings{} + } + + // Mount the root filesystem so we can apply the diff/layer. + mountOpts := MountOpts{ + MountLabel: options.MountLabel, + } + layerFs, err := driver.Get(id, mountOpts) + if err != nil { + return + } + defer driverPut(driver, id, &err) + + defaultForceMask := os.FileMode(0o700) + var forceMask *os.FileMode // = nil + if runtime.GOOS == "darwin" { + forceMask = &defaultForceMask + } + + tarOptions := &archive.TarOptions{ + InUserNS: unshare.IsRootless(), + IgnoreChownErrors: options.IgnoreChownErrors, + ForceMask: forceMask, + } + if options.Mappings != nil { + tarOptions.UIDMaps = options.Mappings.UIDs() + tarOptions.GIDMaps = options.Mappings.GIDs() + } + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil { + logrus.Errorf("While applying layer: %s", err) + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + changes, err := gdw.Changes(id, idMappings, parent, parentMappings, mountLabel) + if err != nil { + return + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return + } + defer driverPut(driver, id, &err) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/containers/storage/drivers/jsoniter.go b/vendor/github.com/containers/storage/drivers/jsoniter.go new file mode 100644 index 0000000000..097f923ab5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/jsoniter.go @@ -0,0 +1,5 @@ +package graphdriver + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go new file mode 100644 index 0000000000..d8139f6566 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -0,0 +1,310 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idmap" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// doesSupportNativeDiff checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. +// When these exist naive diff should be used. +func doesSupportNativeDiff(d, mountOpts string) error { + td, err := os.MkdirTemp(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l1/d1, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0o755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0o755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0o755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0o755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), archive.GetOverlayXattrName("opaque"), []byte("y"), 0); err != nil { + return fmt.Errorf("failed to set opaque flag on middle layer: %w", err) + } + + mountFlags := "lowerdir=%s:%s,upperdir=%s,workdir=%s" + if unshare.IsRootless() { + mountFlags = mountFlags + ",userxattr" + } + + opts := fmt.Sprintf(mountFlags, path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + flags, data := mount.ParseOptions(mountOpts) + if data != "" { + opts = fmt.Sprintf("%s,%s", opts, data) + } + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { + return fmt.Errorf("failed to mount overlay: %w", err) + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0o644); err != nil { + return fmt.Errorf("failed to write to merged directory: %w", err) + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), archive.GetOverlayXattrName("opaque")) + if err != nil { + return fmt.Errorf("failed to read opaque flag on upper layer: %w", err) + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + // rename "d1" to "d2" + if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { + // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled + if err.(*os.LinkError).Err == syscall.EXDEV { + return nil + } + return fmt.Errorf("failed to rename dir in merged directory: %w", err) + } + // get the xattr of "d2" + xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), archive.GetOverlayXattrName("redirect")) + if err != nil { + return fmt.Errorf("failed to read redirect flag on upper layer: %w", err) + } + + if string(xattrRedirect) == "d1" { + return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") + } + + return nil +} + +// doesMetacopy checks if the filesystem is going to optimize changes to +// metadata by using nodes marked with an "overlay.metacopy" attribute to avoid +// copying up a file from a lower layer unless/until its contents are being +// modified +func doesMetacopy(d, mountOpts string) (bool, error) { + td, err := os.MkdirTemp(d, "metacopy-check") + if err != nil { + return false, err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1, l2, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1"), 0o755); err != nil { + return false, err + } + if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0o700); err != nil { + return false, err + } + if err := os.MkdirAll(filepath.Join(td, "l2"), 0o755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil { + return false, err + } + // Mount using the mandatory options and configured options + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "l1"), path.Join(td, "l2"), path.Join(td, "work")) + if unshare.IsRootless() { + opts = fmt.Sprintf("%s,userxattr", opts) + } + flags, data := mount.ParseOptions(mountOpts) + if data != "" { + opts = fmt.Sprintf("%s,%s", opts, data) + } + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { + if errors.Is(err, unix.EINVAL) { + logrus.Infof("overlay: metacopy option not supported on this kernel, checked using options %q", mountOpts) + return false, nil + } + return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err) + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + // Make a change that only impacts the inode, and check if the pulled-up copy is marked + // as a metadata-only copy + if err := os.Chmod(filepath.Join(td, "merged", "f"), 0o600); err != nil { + return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err) + } + metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy")) + if err != nil { + if errors.Is(err, unix.ENOTSUP) { + logrus.Info("metacopy option not supported") + return false, nil + } + return false, fmt.Errorf("metacopy flag was not set on file in upper layer: %w", err) + } + return metacopy != nil, nil +} + +// doesVolatile checks if the filesystem supports the "volatile" mount option +func doesVolatile(d string) (bool, error) { + td, err := os.MkdirTemp(d, "volatile-check") + if err != nil { + return false, err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + if err := os.MkdirAll(filepath.Join(td, "lower"), 0o755); err != nil { + return false, err + } + if err := os.MkdirAll(filepath.Join(td, "upper"), 0o755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil { + return false, err + } + // Mount using the mandatory options and configured options + opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work")) + if unshare.IsRootless() { + opts = fmt.Sprintf("%s,userxattr", opts) + } + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err) + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + return true, nil +} + +// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of +// a idmapped lower layer. +func supportsIdmappedLowerLayers(home string) (bool, error) { + layerDir, err := os.MkdirTemp(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerMappedDir := filepath.Join(layerDir, "lower-mapped") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerMappedDir, 0o700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0o700, 0, 0) + _ = idtools.MkdirAs(workDir, 0o700, 0, 0) + + mapping := []idtools.IDMap{ + { + ContainerID: 0, + HostID: 0, + Size: 1, + }, + } + pid, cleanupFunc, err := idmap.CreateUsernsProcess(mapping, mapping) + if err != nil { + return false, err + } + defer cleanupFunc() + + if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { + return false, fmt.Errorf("create mapped mount: %w", err) + } + defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH) + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + defer func() { + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + }() + return true, nil +} + +// supportsDataOnlyLayers checks if the kernel supports mounting a overlay file system +// that uses data-only layers. +func supportsDataOnlyLayers(home string) (bool, error) { + layerDir, err := os.MkdirTemp(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerDirDataOnly := filepath.Join(layerDir, "lower-data") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerDirDataOnly, 0o700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0o700, 0, 0) + _ = idtools.MkdirAs(workDir, 0o700, 0, 0) + + opts := fmt.Sprintf("lowerdir=%s::%s,upperdir=%s,workdir=%s,metacopy=on", lowerDir, lowerDirDataOnly, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go new file mode 100644 index 0000000000..bec455dd4f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go @@ -0,0 +1,45 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "errors" + "io/fs" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func scanForMountProgramIndicators(home string) (detected bool, err error) { + err = filepath.WalkDir(home, func(path string, d fs.DirEntry, err error) error { + if detected { + return fs.SkipDir + } + if err != nil { + return err + } + basename := filepath.Base(path) + if strings.HasPrefix(basename, archive.WhiteoutPrefix) { + detected = true + return fs.SkipDir + } + if d.IsDir() { + xattrs, err := system.Llistxattr(path) + if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + return err + } + for _, xattr := range xattrs { + if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") { + detected = true + return fs.SkipDir + } + } + } + return nil + }) + return detected, err +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go new file mode 100644 index 0000000000..347e4d35c4 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go @@ -0,0 +1,24 @@ +//go:build !linux || !composefs || !cgo +// +build !linux !composefs !cgo + +package overlay + +import ( + "fmt" +) + +func composeFsSupported() bool { + return false +} + +func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { + return fmt.Errorf("composefs is not supported") +} + +func mountComposefsBlob(dataDir, mountPoint string) error { + return fmt.Errorf("composefs is not supported") +} + +func enableVerityRecursive(path string) (map[string]string, error) { + return nil, fmt.Errorf("composefs is not supported") +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go new file mode 100644 index 0000000000..26dd36866c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go @@ -0,0 +1,219 @@ +//go:build linux && composefs && cgo +// +build linux,composefs,cgo + +package overlay + +import ( + "encoding/binary" + "errors" + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "sync" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/chunked/dump" + "github.com/containers/storage/pkg/loopback" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + composeFsHelperOnce sync.Once + composeFsHelperPath string + composeFsHelperErr error +) + +func getComposeFsHelper() (string, error) { + composeFsHelperOnce.Do(func() { + composeFsHelperPath, composeFsHelperErr = exec.LookPath("mkcomposefs") + }) + return composeFsHelperPath, composeFsHelperErr +} + +func composeFsSupported() bool { + _, err := getComposeFsHelper() + return err == nil +} + +func enableVerity(description string, fd int) error { + enableArg := unix.FsverityEnableArg{ + Version: 1, + Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, + Block_size: 4096, + } + + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) + if e1 != 0 && !errors.Is(e1, unix.EEXIST) { + return fmt.Errorf("failed to enable verity for %q: %w", description, e1) + } + return nil +} + +type verityDigest struct { + Fsv unix.FsverityDigest + Buf [64]byte +} + +func measureVerity(description string, fd int) (string, error) { + var digest verityDigest + digest.Fsv.Size = 64 + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) + if e1 != 0 { + return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) + } + return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil +} + +func enableVerityRecursive(root string) (map[string]string, error) { + digests := make(map[string]string) + walkFn := func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.Type().IsRegular() { + return nil + } + + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + if err := enableVerity(path, int(f.Fd())); err != nil { + return err + } + + verity, err := measureVerity(path, int(f.Fd())) + if err != nil { + return err + } + + relPath, err := filepath.Rel(root, path) + if err != nil { + return err + } + + digests[relPath] = verity + return nil + } + err := filepath.WalkDir(root, walkFn) + return digests, err +} + +func getComposefsBlob(dataDir string) string { + return filepath.Join(dataDir, "composefs.blob") +} + +func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { + if err := os.MkdirAll(composefsDir, 0o700); err != nil { + return err + } + + dumpReader, err := dump.GenerateDump(toc, verityDigests) + if err != nil { + return err + } + + destFile := getComposefsBlob(composefsDir) + writerJson, err := getComposeFsHelper() + if err != nil { + return fmt.Errorf("failed to find mkcomposefs: %w", err) + } + + fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644) + if err != nil { + return fmt.Errorf("failed to open output file %q: %w", destFile, err) + } + outFd := os.NewFile(uintptr(fd), "outFd") + + fd, err = unix.Open(fmt.Sprintf("/proc/self/fd/%d", outFd.Fd()), unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + outFd.Close() + return fmt.Errorf("failed to dup output file: %w", err) + } + newFd := os.NewFile(uintptr(fd), "newFd") + defer newFd.Close() + + err = func() error { + // a scope to close outFd before setting fsverity on the read-only fd. + defer outFd.Close() + + cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3") + cmd.ExtraFiles = []*os.File{outFd} + cmd.Stderr = os.Stderr + cmd.Stdin = dumpReader + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to convert json to erofs: %w", err) + } + return nil + }() + if err != nil { + return err + } + + if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + logrus.Warningf("%s", err) + } + + return nil +} + +/* +typedef enum { + LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0), +} lcfs_erofs_flag_t; + +struct lcfs_erofs_header_s { + uint32_t magic; + uint32_t version; + uint32_t flags; + uint32_t unused[5]; +} __attribute__((__packed__)); +*/ + +// hasACL returns true if the erofs blob has ACLs enabled +func hasACL(path string) (bool, error) { + const LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0) + + fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + return false, err + } + defer unix.Close(fd) + // do not worry about checking the magic number, if the file is invalid + // we will fail to mount it anyway + flags := make([]byte, 4) + nread, err := unix.Pread(fd, flags, 8) + if err != nil { + return false, err + } + if nread != 4 { + return false, fmt.Errorf("failed to read flags from %q", path) + } + return binary.LittleEndian.Uint32(flags)&LCFS_EROFS_FLAGS_HAS_ACL != 0, nil +} + +func mountComposefsBlob(dataDir, mountPoint string) error { + blobFile := getComposefsBlob(dataDir) + loop, err := loopback.AttachLoopDeviceRO(blobFile) + if err != nil { + return err + } + defer loop.Close() + + hasACL, err := hasACL(blobFile) + if err != nil { + return err + } + mountOpts := "ro" + if !hasACL { + mountOpts += ",noacl" + } + + return unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go new file mode 100644 index 0000000000..bedda35078 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go @@ -0,0 +1,8 @@ +//go:build linux +// +build linux + +package overlay + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go new file mode 100644 index 0000000000..8829e55e98 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -0,0 +1,189 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "bytes" + "flag" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/reexec" + "golang.org/x/sys/unix" +) + +func init() { + reexec.Register("storage-mountfrom", mountOverlayFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountOverlayFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("storage-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %w", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("mountfrom error on re-exec cmd: %w", err) + } + // write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("mountfrom json encode to pipe failed: %w", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec output: %s: error: %w", output, err) + } + return nil +} + +// mountfromMain is the entry-point for storage-mountfrom on re-exec. +func mountOverlayFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + // Mount the arguments passed from the specified directory. Some of the + // paths mentioned in the values we pass to the kernel are relative to + // the specified directory. + homedir := flag.Arg(0) + if err := os.Chdir(homedir); err != nil { + fatal(err) + } + + pageSize := unix.Getpagesize() + if len(options.Label) < pageSize { + if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + os.Exit(0) + } + + // Those arguments still took up too much space. Open the diff + // directories and use their descriptor numbers as lowers, using + // /proc/self/fd as the current directory. + + // Split out the various options, since we need to manipulate the + // paths, but we don't want to mess with other options. + var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string + for _, arg := range strings.Split(options.Label, ",") { + kv := strings.SplitN(arg, "=", 2) + switch kv[0] { + case "upperdir": + upperk = "upperdir=" + upperv = kv[1] + case "workdir": + workk = "workdir=" + workv = kv[1] + case "lowerdir": + lowerk = "lowerdir=" + lowerv = kv[1] + case "label": + labelk = "label=" + labelv = kv[1] + default: + if others == "" { + others = arg + } else { + others = others + "," + arg + } + } + } + + // Make sure upperdir, workdir, and the target are absolute paths. + if upperv != "" && !filepath.IsAbs(upperv) { + upperv = filepath.Join(homedir, upperv) + } + if workv != "" && !filepath.IsAbs(workv) { + workv = filepath.Join(homedir, workv) + } + if !filepath.IsAbs(options.Target) { + options.Target = filepath.Join(homedir, options.Target) + } + + // Get a descriptor for each lower, and use that descriptor's name as + // the new value for the list of lowers, because it's shorter. + if lowerv != "" { + lowers := strings.Split(lowerv, ":") + var newLowers []string + dataOnly := false + for _, lowerPath := range lowers { + if lowerPath == "" { + dataOnly = true + continue + } + lowerFd, err := unix.Open(lowerPath, unix.O_RDONLY, 0) + if err != nil { + fatal(err) + } + var lower string + if dataOnly { + lower = fmt.Sprintf(":%d", lowerFd) + dataOnly = false + } else { + lower = fmt.Sprintf("%d", lowerFd) + } + newLowers = append(newLowers, lower) + } + lowerv = strings.Join(newLowers, ":") + } + + // Reconstruct the Label field. + options.Label = upperk + upperv + "," + workk + workv + "," + lowerk + lowerv + "," + labelk + labelv + "," + others + options.Label = strings.ReplaceAll(options.Label, ",,", ",") + + // Okay, try this, if we managed to make the arguments fit. + var err error + if len(options.Label) < pageSize { + if err := os.Chdir("/proc/self/fd"); err != nil { + fatal(err) + } + err = unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label) + } else { + err = fmt.Errorf("cannot mount layer, mount data %q too large %d >= page size %d", options.Label, len(options.Label), pageSize) + } + + // Clean up. + if err != nil { + fmt.Fprintf(os.Stderr, "creating overlay mount to %s, mount_data=%q\n", options.Target, options.Label) + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go new file mode 100644 index 0000000000..04ecf871fd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -0,0 +1,2556 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/drivers/overlayutils" + "github.com/containers/storage/drivers/quota" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fsutils" + "github.com/containers/storage/pkg/idmap" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + units "github.com/docker/go-units" + "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// untar defines the untar method +var untar = chrootarchive.UntarUncompressed + +const ( + defaultPerms = os.FileMode(0o555) + selinuxLabelTest = "system_u:object_r:container_file_t:s0" + mountProgramFlagFile = ".has-mount-program" +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 500 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + linkDir = "l" + stagingDir = "staging" + lowerFile = "lower" + maxDepth = 500 + + tocArtifact = "toc" + + // idLength represents the number of random characters + // which can be used to create the unique link identifier + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata, 128 is the + // number of lowers we want to be able to use without having + // to use bind mounts to get all the way to the kernel limit). + // ((idLength + len(linkDir) + 1) * 128) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + imageStores []string + layerStores []additionalLayerStore + quota quota.Quota + mountProgram string + skipMountHome bool + mountOptions string + ignoreChownErrors bool + forceMask *os.FileMode +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + name string + home string + runhome string + imageStore string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool + supportsVolatile *bool + usingMetacopy bool + + supportsIDMappedMounts *bool +} + +type additionalLayerStore struct { + // path is the directory where this store is available on the host. + path string + + // withReference is true when the store contains image reference information (base64-encoded) + // in its layer search path so the path to the diff will be + // /base64(reference)// + withReference bool +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.MustRegister("overlay", Init) + graphdriver.MustRegister("overlay2", Init) +} + +func hasMetacopyOption(opts []string) bool { + for _, s := range opts { + if s == "metacopy=on" { + return true + } + } + return false +} + +func stripOption(opts []string, option string) []string { + for i, s := range opts { + if s == option { + return stripOption(append(opts[:i], opts[i+1:]...), option) + } + } + return opts +} + +func hasVolatileOption(opts []string) bool { + for _, s := range opts { + if s == "volatile" { + return true + } + } + return false +} + +func getMountProgramFlagFile(path string) string { + return filepath.Join(path, mountProgramFlagFile) +} + +func checkSupportVolatile(home, runhome string) (bool, error) { + const feature = "volatile" + volatileCacheResult, _, err := cachedFeatureCheck(runhome, feature) + var usingVolatile bool + if err == nil { + if volatileCacheResult { + logrus.Debugf("Cached value indicated that volatile is being used") + } else { + logrus.Debugf("Cached value indicated that volatile is not being used") + } + usingVolatile = volatileCacheResult + } else { + usingVolatile, err = doesVolatile(home) + if err == nil { + if usingVolatile { + logrus.Debugf("overlay: test mount indicated that volatile is being used") + } else { + logrus.Debugf("overlay: test mount indicated that volatile is not being used") + } + if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil { + return false, fmt.Errorf("recording volatile-being-used status: %w", err) + } + } else { + usingVolatile = false + } + } + return usingVolatile, nil +} + +// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a +// idmapped lower layer. +func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { + if os.Geteuid() != 0 { + return false, nil + } + + feature := "idmapped-lower-dir" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that idmapped mounts for overlay are supported") + return true, nil + } + logrus.Debugf("Cached value indicated that idmapped mounts for overlay are not supported") + return false, errors.New(overlayCacheText) + } + supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home) + if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil { + return false, fmt.Errorf("recording overlay idmapped mounts support status: %w", err2) + } + return supportsIDMappedMounts, err +} + +func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) { + var supportsDType bool + + if os.Geteuid() != 0 { + return false, nil + } + + feature := "overlay" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that overlay is supported") + } else { + logrus.Debugf("Cached value indicated that overlay is not supported") + } + supportsDType = overlayCacheResult + if !supportsDType { + return false, errors.New(overlayCacheText) + } + } else { + supportsDType, err = supportsOverlay(home, fsMagic, 0, 0) + if err != nil { + os.Remove(filepath.Join(home, linkDir)) + os.Remove(home) + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return false, err + } + err = fmt.Errorf("kernel does not support overlay fs: %w", err) + if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil { + return false, fmt.Errorf("recording overlay not being supported (%v): %w", err, err2) + } + return false, err + } + if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil { + return false, fmt.Errorf("recording overlay support status: %w", err) + } + } + return supportsDType, nil +} + +func (d *Driver) getSupportsVolatile() (bool, error) { + if d.supportsVolatile != nil { + return *d.supportsVolatile, nil + } + supportsVolatile, err := checkSupportVolatile(d.home, d.runhome) + if err != nil { + return false, err + } + d.supportsVolatile = &supportsVolatile + return supportsVolatile, nil +} + +// isNetworkFileSystem checks if the specified file system is supported by native overlay +// as backing store when running in a user namespace. +func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { + switch fsMagic { + // a bunch of network file systems... + case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs, + graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS, + graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, + graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP, + graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS, + graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS, + graphdriver.FsMagicVBOXSF, graphdriver.FsMagicVXFS: + return true + } + return false +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + // If custom --imagestore is selected never + // ditch the original graphRoot, instead add it as + // additionalImageStore so its images can still be + // read and used. + if options.ImageStore != "" { + graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore) + options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore) + // complete base name with driver name included + options.ImageStore = filepath.Join(options.ImageStore, "overlay") + } + opts, err := parseOptions(options.DriverOptions) + if err != nil { + return nil, err + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + fsName, ok := graphdriver.FsNames[fsMagic] + if !ok { + fsName = "" + } + backingFs = fsName + + runhome := filepath.Join(options.RunRoot, filepath.Base(home)) + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0o755, 0, 0); err != nil { + return nil, err + } + + if options.ImageStore != "" { + if err := idtools.MkdirAllAs(path.Join(options.ImageStore, linkDir), 0o755, 0, 0); err != nil { + return nil, err + } + } + + if err := idtools.MkdirAllAs(runhome, 0o700, rootUID, rootGID); err != nil { + return nil, err + } + + if opts.mountProgram == "" { + if supported, err := SupportsNativeOverlay(home, runhome); err != nil { + return nil, err + } else if !supported { + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.mountProgram = path + } + } + } + + if opts.mountProgram != "" { + if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { + m := os.FileMode(0o700) + opts.forceMask = &m + logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m) + } + + if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0o600); err != nil { + return nil, err + } + } else { + if opts.forceMask != nil { + return nil, errors.New("'force_mask' is supported only with 'mount_program'") + } + // check if they are running over btrfs, aufs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS) + } + if unshare.IsRootless() && isNetworkFileSystem(fsMagic) { + return nil, fmt.Errorf("a network file system with user namespaces is not supported. Please use a mount_program: %w", graphdriver.ErrIncompatibleFS) + } + } + + var usingMetacopy bool + var supportsDType bool + var supportsVolatile *bool + if opts.mountProgram != "" { + supportsDType = true + t := true + supportsVolatile = &t + } else { + supportsDType, err = checkAndRecordOverlaySupport(fsMagic, home, runhome) + if err != nil { + return nil, err + } + feature := fmt.Sprintf("metacopy(%s)", opts.mountOptions) + metacopyCacheResult, _, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if metacopyCacheResult { + logrus.Debugf("Cached value indicated that metacopy is being used") + } else { + logrus.Debugf("Cached value indicated that metacopy is not being used") + } + usingMetacopy = metacopyCacheResult + } else { + usingMetacopy, err = doesMetacopy(home, opts.mountOptions) + if err == nil { + if usingMetacopy { + logrus.Debugf("overlay: test mount indicated that metacopy is being used") + } else { + logrus.Debugf("overlay: test mount indicated that metacopy is not being used") + } + if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil { + return nil, fmt.Errorf("recording metacopy-being-used status: %w", err) + } + } else { + logrus.Infof("overlay: test mount did not indicate whether or not metacopy is being used: %v", err) + return nil, err + } + } + } + + if !opts.skipMountHome { + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + } + + fileSystemType := graphdriver.FsMagicOverlay + if opts.mountProgram != "" { + fileSystemType = graphdriver.FsMagicFUSE + } + + d := &Driver{ + name: "overlay", + home: home, + imageStore: options.ImageStore, + runhome: runhome, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)), + supportsDType: supportsDType, + usingMetacopy: usingMetacopy, + supportsVolatile: supportsVolatile, + options: *opts, + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)) + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { + return nil, fmt.Errorf("storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %w", err) + } + } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { + // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. + return nil, fmt.Errorf("storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs) + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + trimkey := strings.ToLower(key) + trimkey = strings.TrimPrefix(trimkey, "overlay.") + trimkey = strings.TrimPrefix(trimkey, "overlay2.") + trimkey = strings.TrimPrefix(trimkey, ".") + switch trimkey { + case "override_kernel_check": + logrus.Debugf("overlay: override_kernel_check option was specified, but is no longer necessary") + case "mountopt": + o.mountOptions = val + case "size": + logrus.Debugf("overlay: size=%s", val) + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + o.quota.Size = uint64(size) + case "inodes": + logrus.Debugf("overlay: inodes=%s", val) + inodes, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + o.quota.Inodes = uint64(inodes) + case "imagestore", "additionalimagestore": + logrus.Debugf("overlay: imagestore=%s", val) + // Additional read only image stores to use for lower paths + if val == "" { + continue + } + for _, store := range strings.Split(val, ",") { + store = filepath.Clean(store) + if !filepath.IsAbs(store) { + return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store) + } + st, err := os.Stat(store) + if err != nil { + return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %w", store, err) + } + if !st.IsDir() { + return nil, fmt.Errorf("overlay: image path %q must be a directory", store) + } + o.imageStores = append(o.imageStores, store) + } + case "additionallayerstore": + logrus.Debugf("overlay: additionallayerstore=%s", val) + // Additional read only layer stores to use for lower paths + if val == "" { + continue + } + for _, lstore := range strings.Split(val, ",") { + elems := strings.Split(lstore, ":") + lstore = filepath.Clean(elems[0]) + if !filepath.IsAbs(lstore) { + return nil, fmt.Errorf("overlay: additionallayerstore path %q is not absolute. Can not be relative", lstore) + } + st, err := os.Stat(lstore) + if err != nil { + return nil, fmt.Errorf("overlay: can't stat additionallayerstore dir: %w", err) + } + if !st.IsDir() { + return nil, fmt.Errorf("overlay: additionallayerstore path %q must be a directory", lstore) + } + var withReference bool + for _, e := range elems[1:] { + switch e { + case "ref": + if withReference { + return nil, fmt.Errorf("overlay: additionallayerstore config of %q contains %q option twice", lstore, e) + } + withReference = true + default: + return nil, fmt.Errorf("overlay: additionallayerstore config %q contains unknown option %q", lstore, e) + } + } + o.layerStores = append(o.layerStores, additionalLayerStore{ + path: lstore, + withReference: withReference, + }) + } + case "mount_program": + logrus.Debugf("overlay: mount_program=%s", val) + if val != "" { + _, err := os.Stat(val) + if err != nil { + return nil, fmt.Errorf("overlay: can't stat program %q: %w", val, err) + } + } + o.mountProgram = val + case "skip_mount_home": + logrus.Debugf("overlay: skip_mount_home=%s", val) + o.skipMountHome, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "ignore_chown_errors": + logrus.Debugf("overlay: ignore_chown_errors=%s", val) + o.ignoreChownErrors, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "force_mask": + logrus.Debugf("overlay: force_mask=%s", val) + var mask int64 + switch val { + case "shared": + mask = 0o755 + case "private": + mask = 0o700 + default: + mask, err = strconv.ParseInt(val, 8, 32) + if err != nil { + return nil, err + } + } + m := os.FileMode(mask) + o.forceMask = &m + default: + return nil, fmt.Errorf("overlay: Unknown option %s", key) + } + } + return o, nil +} + +func cachedFeatureSet(feature string, set bool) string { + if set { + return fmt.Sprintf("%s-true", feature) + } + return fmt.Sprintf("%s-false", feature) +} + +func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) { + content, err := os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true))) + if err == nil { + return true, string(content), nil + } + content, err = os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false))) + if err == nil { + return false, string(content), nil + } + return false, "", err +} + +func cachedFeatureRecord(runhome, feature string, supported bool, text string) (err error) { + f, err := os.Create(filepath.Join(runhome, cachedFeatureSet(feature, supported))) + if f != nil { + if text != "" { + fmt.Fprintf(f, "%s", text) + } + f.Close() + } + return err +} + +func SupportsNativeOverlay(home, runhome string) (bool, error) { + if os.Geteuid() != 0 || home == "" || runhome == "" { + return false, nil + } + + var contents string + flagContent, err := os.ReadFile(getMountProgramFlagFile(home)) + if err == nil { + contents = strings.TrimSpace(string(flagContent)) + } + switch contents { + case "true": + logrus.Debugf("overlay: storage already configured with a mount-program") + return false, nil + default: + needsMountProgram, err := scanForMountProgramIndicators(home) + if err != nil && !os.IsNotExist(err) { + return false, err + } + if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0o600); err != nil && !os.IsNotExist(err) { + return false, err + } + if needsMountProgram { + return false, nil + } + // fall through to check if we find ourselves needing to use a + // mount program now + case "false": + } + + for _, dir := range []string{home, runhome} { + if _, err := os.Stat(dir); err != nil { + _ = idtools.MkdirAllAs(dir, 0o700, 0, 0) + } + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return false, err + } + + supportsDType, _ := checkAndRecordOverlaySupport(fsMagic, home, runhome) + return supportsDType, nil +} + +func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { + // We can try to modprobe overlay first + + selinuxLabelTest := selinux.PrivContainerMountLabel() + + exec.Command("modprobe", "overlay").Run() + + logLevel := logrus.ErrorLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } + + layerDir, err := os.MkdirTemp(home, "compat") + if err != nil { + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return false, err + } + } + if err == nil { + // Check if reading the directory's contents populates the d_type field, which is required + // for proper operation of the overlay filesystem. + supportsDType, err = fsutils.SupportsDType(layerDir) + if err != nil { + return false, err + } + if !supportsDType { + return false, overlayutils.ErrDTypeNotSupported("overlay", backingFs) + } + + // Try a test mount in the specific location we're looking at using. + mergedDir := filepath.Join(layerDir, "merged") + mergedSubdir := filepath.Join(mergedDir, "subdir") + lower1Dir := filepath.Join(layerDir, "lower1") + lower2Dir := filepath.Join(layerDir, "lower2") + lower2Subdir := filepath.Join(lower2Dir, "subdir") + lower2SubdirFile := filepath.Join(lower2Subdir, "file") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + defer func() { + // Permitted to fail, since the various subdirectories + // can be empty or not even there, and the home might + // legitimately be not empty + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + _ = os.RemoveAll(layerDir) + _ = os.Remove(home) + }() + _ = idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(lower1Dir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Dir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Subdir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(upperDir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(workDir, 0o700, rootUID, rootGID) + f, err := os.Create(lower2SubdirFile) + if err != nil { + logrus.Debugf("Unable to create test file: %v", err) + return supportsDType, fmt.Errorf("unable to create test file: %w", err) + } + f.Close() + flags := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", lower1Dir, lower2Dir, upperDir, workDir) + if selinux.GetEnabled() && + selinux.SecurityCheckContext(selinuxLabelTest) == nil { + // Linux 5.11 introduced unprivileged overlay mounts but it has an issue + // when used together with selinux labels. + // Check that overlay supports selinux labels as well. + flags = label.FormatMountLabel(flags, selinuxLabelTest) + } + if unshare.IsRootless() { + flags = fmt.Sprintf("%s,userxattr", flags) + } + if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0o600, int(unix.Mkdev(0, 0))); err != nil { + logrus.Debugf("Unable to create kernel-style whiteout: %v", err) + return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err) + } + + if len(flags) < unix.Getpagesize() { + err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) + if err == nil { + if err = os.RemoveAll(mergedSubdir); err != nil { + logrus.StandardLogger().Logf(logLevel, "overlay: removing an item from the merged directory failed: %v", err) + return supportsDType, fmt.Errorf("kernel returned %v when we tried to delete an item in the merged directory: %w", err, graphdriver.ErrNotSupported) + } + logrus.Debugf("overlay: test mount with multiple lowers succeeded") + return supportsDType, nil + } + logrus.Debugf("overlay: test mount with multiple lowers failed: %v", err) + } + flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir) + if selinux.GetEnabled() { + flags = label.FormatMountLabel(flags, selinuxLabelTest) + } + if len(flags) < unix.Getpagesize() { + err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) + if err == nil { + logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower") + return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported) + } + logrus.Debugf("overlay: test mount with a single lower failed: %v", err) + } + logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home) + return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS) + } + + logrus.StandardLogger().Logf(logLevel, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return supportsDType, fmt.Errorf("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.: %w", graphdriver.ErrNotSupported) +} + +func (d *Driver) useNaiveDiff() bool { + if d.useComposeFs() { + return true + } + + useNaiveDiffLock.Do(func() { + if d.options.mountProgram != "" { + useNaiveDiffOnly = true + return + } + feature := fmt.Sprintf("native-diff(%s)", d.options.mountOptions) + nativeDiffCacheResult, nativeDiffCacheText, err := cachedFeatureCheck(d.runhome, feature) + if err == nil { + if nativeDiffCacheResult { + logrus.Debugf("Cached value indicated that native-diff is usable") + } else { + logrus.Debugf("Cached value indicated that native-diff is not being used") + logrus.Info(nativeDiffCacheText) + } + useNaiveDiffOnly = !nativeDiffCacheResult + return + } + if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil { + nativeDiffCacheText = fmt.Sprintf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err) + logrus.Info(nativeDiffCacheText) + useNaiveDiffOnly = true + } + cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText) + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return d.name +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + supportsVolatile, err := d.getSupportsVolatile() + if err != nil { + supportsVolatile = false + } + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())}, + {"Using metacopy", strconv.FormatBool(d.usingMetacopy)}, + {"Supports shifting", strconv.FormatBool(d.SupportsShifting())}, + {"Supports volatile", strconv.FormatBool(supportsVolatile)}, + } +} + +// Metadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) Metadata(id string) (map[string]string, error) { + dir, imagestore, _ := d.dir2(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + workDirBase := dir + if imagestore != "" { + if _, err := os.Stat(dir); err != nil { + return nil, err + } + workDirBase = imagestore + } + + metadata := map[string]string{ + "WorkDir": path.Join(workDirBase, "work"), + "MergedDir": path.Join(workDirBase, "merged"), + "UpperDir": path.Join(workDirBase, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + _ = os.RemoveAll(d.getStagingDir()) + return mount.Unmount(d.home) +} + +// LookupAdditionalLayer looks up additional layer store by the specified +// digest and ref and returns an object representing that layer. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (d *Driver) LookupAdditionalLayer(dgst digest.Digest, ref string) (graphdriver.AdditionalLayer, error) { + l, err := d.getAdditionalLayerPath(dgst, ref) + if err != nil { + return nil, err + } + // Tell the additional layer store that we use this layer. + // This will increase reference counter on the store's side. + // This will be decreased on Release() method. + notifyUseAdditionalLayer(l) + return &additionalLayer{ + path: l, + d: d, + }, nil +} + +// LookupAdditionalLayerByID looks up additional layer store by the specified +// ID and returns an object representing that layer. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (d *Driver) LookupAdditionalLayerByID(id string) (graphdriver.AdditionalLayer, error) { + l, err := d.getAdditionalLayerPathByID(id) + if err != nil { + return nil, err + } + // Tell the additional layer store that we use this layer. + // This will increase reference counter on the store's side. + // This will be decreased on Release() method. + notifyUseAdditionalLayer(l) + return &additionalLayer{ + path: l, + d: d, + }, nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if readWrite { + return d.CreateReadWrite(id, template, opts) + } + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + if opts == nil { + opts = &graphdriver.CreateOpts{ + StorageOpt: map[string]string{}, + } + } + + if _, ok := opts.StorageOpt["size"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) + } + + if _, ok := opts.StorageOpt["inodes"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10) + } + + return d.create(id, parent, opts, false) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + if _, ok := opts.StorageOpt["size"]; ok { + return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } + + if _, ok := opts.StorageOpt["inodes"]; ok { + return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers") + } + } + + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) { + dir, imageStore, _ := d.dir2(id) + + uidMaps := d.uidMaps + gidMaps := d.gidMaps + + if opts != nil && opts.IDMappings != nil { + uidMaps = opts.IDMappings.UIDs() + gidMaps = opts.IDMappings.GIDs() + } + + // Make the link directory if it does not exist + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { + return err + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return err + } + + idPair := idtools.IDPair{ + UID: rootUID, + GID: rootGID, + } + + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil { + return err + } + workDirBase := dir + if imageStore != "" { + workDirBase = imageStore + if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil { + return err + } + } + if parent != "" { + parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) + // If parentBase path is additional image store, select the image contained in parentBase. + // See https://github.com/containers/podman/issues/19748 + if parentImageStore != "" && !inAdditionalStore { + parentBase = parentImageStore + } + st, err := system.Stat(filepath.Join(parentBase, "diff")) + if err != nil { + return err + } + rootUID = int(st.UID()) + rootGID = int(st.GID()) + } + + if _, err := system.Lstat(dir); err == nil { + logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir) + // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir, + // so that we can’t end up with two symlinks in linkDir pointing to the same layer. + if err := d.Remove(id); err != nil { + return fmt.Errorf("removing a pre-existing layer directory %q: %w", dir, err) + } + } + + if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil { + return err + } + if imageStore != "" { + if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil { + return err + } + } + + defer func() { + // Clean up on failure + if retErr != nil { + if err2 := os.RemoveAll(dir); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) + } + if imageStore != "" { + if err2 := os.RemoveAll(workDirBase); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2) + } + } + } + }() + + if d.quotaCtl != nil && !disableQuota { + quota := quota.Quota{} + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + if driver.options.quota.Size > 0 { + quota.Size = driver.options.quota.Size + } + if driver.options.quota.Inodes > 0 { + quota.Inodes = driver.options.quota.Inodes + } + } + // Set container disk quota limit + // If it is set to 0, we will track the disk usage, but not enforce a limit + if err := d.quotaCtl.SetQuota(dir, quota); err != nil { + return err + } + if imageStore != "" { + if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil { + return err + } + } + } + + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + + if parent != "" { + parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) + // If parentBase path is additional image store, select the image contained in parentBase. + // See https://github.com/containers/podman/issues/19748 + if parentImageStore != "" && !inAdditionalStore { + parentBase = parentImageStore + } + st, err := system.Stat(filepath.Join(parentBase, "diff")) + if err != nil { + return err + } + perms = os.FileMode(st.Mode()) + } + + if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + + linkBase := path.Join("..", id, "diff") + if imageStore != "" { + linkBase = path.Join(imageStore, "diff") + } + if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0o644); err != nil { + return err + } + + if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil { + return err + } + + // if no parent directory, create a dummy lower directory and skip writing a "lowers" file + if parent == "" { + return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, rootUID, rootGID) + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0o666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + case "inodes": + inodes, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return err + } + driver.options.quota.Inodes = uint64(inodes) + default: + return fmt.Errorf("unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := os.ReadFile(path.Join(parentDir, "link")) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link")) + if err := d.recreateSymlinks(); err != nil { + return "", fmt.Errorf("recreating the links: %w", err) + } + parentLink, err = os.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + p, _, _ := d.dir2(id) + return p +} + +func (d *Driver) dir2(id string) (string, string, bool) { + newpath := path.Join(d.home, id) + imageStore := "" + if d.imageStore != "" { + imageStore = path.Join(d.imageStore, id) + } + if _, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + l := path.Join(p, d.name, id) + _, err = os.Stat(l) + if err == nil { + return l, imageStore, true + } + } + } + return newpath, imageStore, false +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lower := d.dir(s) + lp, err := os.Readlink(lower) + // if the link does not exist, we lost the symlinks during a sudden reboot. + // Let's go ahead and recreate those symlinks. + if err != nil { + if os.IsNotExist(err) { + logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower) + if err := d.recreateSymlinks(); err != nil { + return nil, fmt.Errorf("recreating the missing symlinks: %w", err) + } + // let's call Readlink on lower again now that we have recreated the missing symlinks + lp, err = os.Readlink(lower) + if err != nil { + return nil, err + } + } else { + return nil, err + } + } + lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp)))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string { + if uidMaps == nil { + uidMaps = d.uidMaps + } + if gidMaps == nil { + gidMaps = d.gidMaps + } + if uidMaps != nil { + var uids, gids bytes.Buffer + if len(uidMaps) == 1 && uidMaps[0].Size == 1 { + uids.WriteString(fmt.Sprintf("squash_to_uid=%d", uidMaps[0].HostID)) + } else { + uids.WriteString("uidmapping=") + for _, i := range uidMaps { + if uids.Len() > 0 { + uids.WriteString(":") + } + uids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) + } + } + if len(gidMaps) == 1 && gidMaps[0].Size == 1 { + gids.WriteString(fmt.Sprintf("squash_to_gid=%d", gidMaps[0].HostID)) + } else { + gids.WriteString("gidmapping=") + for _, i := range gidMaps { + if gids.Len() > 0 { + gids.WriteString(":") + } + gids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) + } + } + return fmt.Sprintf("%s,%s,%s", opts, uids.String(), gids.String()) + } + return opts +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + lid, err := os.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + d.releaseAdditionalLayerByID(id) + + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + if d.quotaCtl != nil { + d.quotaCtl.ClearQuota(dir) + if d.imageStore != "" { + d.quotaCtl.ClearQuota(d.imageStore) + } + } + return nil +} + +// recreateSymlinks goes through the driver's home directory and checks if the diff directory +// under each layer has a symlink created for it under the linkDir. If the symlink does not +// exist, it creates them +func (d *Driver) recreateSymlinks() error { + // We have at most 3 corrective actions per layer, so 10 iterations is plenty. + const maxIterations = 10 + + // List all the directories under the home directory + dirs, err := os.ReadDir(d.home) + if err != nil { + return fmt.Errorf("reading driver home directory %q: %w", d.home, err) + } + // This makes the link directory if it doesn't exist + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { + return err + } + // Keep looping as long as we take some corrective action in each iteration + var errs *multierror.Error + madeProgress := true + iterations := 0 + for madeProgress { + errs = nil + madeProgress = false + // Check that for each layer, there's a link in "l" with the name in + // the layer's "link" file that points to the layer's "diff" directory. + for _, dir := range dirs { + // Skip over the linkDir and anything that is not a directory + if dir.Name() == linkDir || !dir.IsDir() { + continue + } + // Read the "link" file under each layer to get the name of the symlink + data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link")) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err)) + continue + } + linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) + // Check if the symlink exists, and if it doesn't, create it again with the + // name we got from the "link" file + _, err = os.Lstat(linkPath) + if err != nil && os.IsNotExist(err) { + if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { + errs = multierror.Append(errs, err) + continue + } + madeProgress = true + } else if err != nil { + errs = multierror.Append(errs, err) + continue + } + } + + // linkDirFullPath is the full path to the linkDir + linkDirFullPath := filepath.Join(d.home, "l") + // Now check if we somehow lost a "link" file, by making sure + // that each symlink we have corresponds to one. + links, err := os.ReadDir(linkDirFullPath) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + // Go through all of the symlinks in the "l" directory + for _, link := range links { + // Read the symlink's target, which should be "../$layer/diff" + target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name())) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + targetComponents := strings.Split(target, string(os.PathSeparator)) + if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { + errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target)) + // force the link to be recreated on the next pass + if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil { + if !os.IsNotExist(err) { + errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err)) + } // else don’t report any error, but also don’t set madeProgress. + continue + } + madeProgress = true + continue + } + // Reconstruct the name of the target's link file and check that + // it has the basename of our symlink in it. + targetID := targetComponents[1] + linkFile := filepath.Join(d.dir(targetID), "link") + data, err := os.ReadFile(linkFile) + if err != nil || string(data) != link.Name() { + // NOTE: If two or more links point to the same target, we will update linkFile + // with every value of link.Name(), and set madeProgress = true every time. + if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil { + errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err)) + continue + } + madeProgress = true + } + } + iterations++ + if iterations >= maxIterations { + errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) + break + } + } + if errs != nil { + return errs.ErrorOrNil() + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + return d.get(id, false, options) +} + +func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { + dir, imageStore, inAdditionalStore := d.dir2(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + workDirBase := dir + if imageStore != "" { + workDirBase = imageStore + } + readWrite := !inAdditionalStore + + if !d.SupportsShifting() || options.DisableShifting { + disableShifting = true + } + + logLevel := logrus.WarnLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } + optsList := options.Options + + needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" + + if len(optsList) == 0 { + if d.options.mountOptions != "" { + optsList = strings.Split(d.options.mountOptions, ",") + } + } else { + // If metacopy=on is present in d.options.mountOptions it must be present in the mount + // options otherwise the kernel refuses to follow the metacopy xattr. + if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) { + if d.usingMetacopy { + logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally") + optsList = append(optsList, "metacopy=on") + } + } + } + if !d.usingMetacopy { + if hasMetacopyOption(optsList) { + if d.options.mountProgram == "" { + release := "" + var uts unix.Utsname + if err := unix.Uname(&uts); err == nil { + release = " " + string(uts.Release[:]) + " " + string(uts.Version[:]) + } + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release) + } else { + logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it") + } + } + optsList = stripOption(optsList, "metacopy=on") + } + + for _, o := range optsList { + if o == "ro" { + readWrite = false + break + } + } + + lowers, err := os.ReadFile(path.Join(dir, lowerFile)) + if err != nil && !os.IsNotExist(err) { + return "", err + } + splitLowers := strings.Split(string(lowers), ":") + if len(splitLowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + + // absLowers is the list of lowers as absolute paths. + absLowers := []string{} + + diffN := 1 + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + permsKnown := false + st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + if err == nil { + perms = os.FileMode(st.Mode()) + permsKnown = true + } + for err == nil { + absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) + diffN++ + st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + if err == nil && !permsKnown { + perms = os.FileMode(st.Mode()) + permsKnown = true + } + } + + idmappedMountProcessPid := -1 + if needsIDMapping { + pid, cleanupFunc, err := idmap.CreateUsernsProcess(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + idmappedMountProcessPid = int(pid) + defer cleanupFunc() + } + + skipIDMappingLayers := make(map[string]string) + + composefsMounts := []string{} + defer func() { + for _, m := range composefsMounts { + defer unix.Unmount(m, unix.MNT_DETACH) + } + }() + + composeFsLayers := []string{} + composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers") + maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { + composefsBlob := d.getComposefsData(lowerID) + _, err = os.Stat(composefsBlob) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID) + + if readWrite && i == 0 { + return "", fmt.Errorf("cannot mount a composefs layer as writeable") + } + + dest := filepath.Join(composeFsLayersDir, fmt.Sprintf("%d", i)) + if err := os.MkdirAll(dest, 0o700); err != nil { + return "", err + } + + if err := mountComposefsBlob(composefsBlob, dest); err != nil { + return "", err + } + composefsMounts = append(composefsMounts, dest) + composeFsPath, err := d.getDiffPath(lowerID) + if err != nil { + return "", err + } + composeFsLayers = append(composeFsLayers, composeFsPath) + skipIDMappingLayers[composeFsPath] = composeFsPath + return dest, nil + } + + diffDir := path.Join(workDirBase, "diff") + + if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil { + return "", err + } else if dest != "" { + diffDir = dest + } + + // For each lower, resolve its path, and append it and any additional diffN + // directories to the lowers list. + for i, l := range splitLowers { + if l == "" { + continue + } + + lower := "" + newpath := path.Join(d.home, l) + if st, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + lower = path.Join(p, d.name, l) + if st2, err2 := os.Stat(lower); err2 == nil { + if !permsKnown { + perms = os.FileMode(st2.Mode()) + permsKnown = true + } + break + } + lower = "" + } + // if it is a "not found" error, that means the symlinks were lost in a sudden reboot + // so call the recreateSymlinks function to go through all the layer dirs and recreate + // the symlinks with the name from their respective "link" files + if lower == "" && os.IsNotExist(err) { + logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath) + if err := d.recreateSymlinks(); err != nil { + return "", fmt.Errorf("recreating the missing symlinks: %w", err) + } + lower = newpath + } else if lower == "" { + return "", fmt.Errorf("can't stat lower layer %q: %w", newpath, err) + } + } else { + if !permsKnown { + perms = os.FileMode(st.Mode()) + permsKnown = true + } + lower = newpath + } + + linkContent, err := os.Readlink(lower) + if err != nil { + return "", err + } + lowerID := filepath.Base(filepath.Dir(linkContent)) + composefsMount, err := maybeAddComposefsMount(lowerID, i+1, readWrite) + if err != nil { + return "", err + } + if composefsMount != "" { + if needsIDMapping { + if err := idmap.CreateIDMappedMount(composefsMount, composefsMount, idmappedMountProcessPid); err != nil { + return "", fmt.Errorf("create mapped mount for %q: %w", composefsMount, err) + } + skipIDMappingLayers[composefsMount] = composefsMount + // overlay takes a reference on the mount, so it is safe to unmount + // the mapped idmounts as soon as the final overlay file system is mounted. + defer unix.Unmount(composefsMount, unix.MNT_DETACH) + } + absLowers = append(absLowers, composefsMount) + continue + } + + absLowers = append(absLowers, lower) + diffN = 1 + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + for err == nil { + absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + diffN++ + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + } + } + + if len(composeFsLayers) > 0 { + optsList = append(optsList, "metacopy=on", "redirect_dir=on") + } + + if len(absLowers) == 0 { + absLowers = append(absLowers, path.Join(dir, "empty")) + } + + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + + if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { + return "", err + } + + mergedDir := path.Join(workDirBase, "merged") + // Create the driver merged dir + if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return "", err + } + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { + logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr) + } + } + } + }() + + workdir := path.Join(workDirBase, "work") + + if d.options.mountProgram == "" && unshare.IsRootless() { + optsList = append(optsList, "userxattr") + } + + if options.Volatile && !hasVolatileOption(optsList) { + supported, err := d.getSupportsVolatile() + if err != nil { + return "", err + } + // If "volatile" is not supported by the file system, just ignore the request + if supported { + optsList = append(optsList, "volatile") + } + } + + if needsIDMapping { + var newAbsDir []string + idMappedMounts := make(map[string]string) + + mappedRoot := filepath.Join(d.home, id, "mapped") + if err := os.MkdirAll(mappedRoot, 0o700); err != nil { + return "", err + } + + // rewrite the lower dirs to their idmapped mount. + c := 0 + for _, absLower := range absLowers { + mappedMountSrc := getMappedMountRoot(absLower) + + if _, ok := skipIDMappingLayers[absLower]; ok { + newAbsDir = append(newAbsDir, absLower) + continue + } + + root, found := idMappedMounts[mappedMountSrc] + if !found { + root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c)) + c++ + if err := idmap.CreateIDMappedMount(mappedMountSrc, root, idmappedMountProcessPid); err != nil { + return "", fmt.Errorf("create mapped mount for %q on %q: %w", mappedMountSrc, root, err) + } + idMappedMounts[mappedMountSrc] = root + + // overlay takes a reference on the mount, so it is safe to unmount + // the mapped idmounts as soon as the final overlay file system is mounted. + defer unix.Unmount(root, unix.MNT_DETACH) + } + + // relative path to the layer through the id mapped mount + rel, err := filepath.Rel(mappedMountSrc, absLower) + if err != nil { + return "", err + } + + newAbsDir = append(newAbsDir, filepath.Join(root, rel)) + } + absLowers = newAbsDir + } + + lowerDirs := strings.Join(absLowers, ":") + if len(composeFsLayers) > 0 { + composeFsLayersLowerDirs := strings.Join(composeFsLayers, "::") + lowerDirs = lowerDirs + "::" + composeFsLayersLowerDirs + } + // absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent + // its usage. + absLowers = nil //nolint:ineffassign + + var opts string + if readWrite { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) + } + + mountData := label.FormatMountLabel(opts, options.MountLabel) + mountFunc := unix.Mount + mountTarget := mergedDir + + pageSize := unix.Getpagesize() + + if d.options.mountProgram != "" { + mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { + if !disableShifting { + label = d.optsAppendMappings(label, options.UidMaps, options.GidMaps) + } + + // if forceMask is in place, tell fuse-overlayfs to write the permissions mask to an unprivileged xattr as well. + if d.options.forceMask != nil { + label = label + ",xattr_permissions=2" + } + + mountProgram := exec.Command(d.options.mountProgram, "-o", label, target) + mountProgram.Dir = d.home + var b bytes.Buffer + mountProgram.Stderr = &b + err := mountProgram.Run() + if err != nil { + output := b.String() + if output == "" { + output = "" + } + return fmt.Errorf("using mount program %s: %s: %w", d.options.mountProgram, output, err) + } + return nil + } + } else if len(mountData) >= pageSize { + // Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if + // the mount data cannot fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chdir(). + if readWrite { + diffDir := path.Join(id, "diff") + workDir := path.Join(id, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workDir) + } else { + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs) + } + if len(optsList) > 0 { + opts = strings.Join(append([]string{opts}, optsList...), ",") + } + mountData = label.FormatMountLabel(opts, options.MountLabel) + mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { + return mountOverlayFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + + // overlay has a check in place to prevent mounting the same file system twice + // if volatile was already specified. Yes, the kernel repeats the "work" component. + err = os.RemoveAll(filepath.Join(workdir, "work", "incompat", "volatile")) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return "", err + } + + flags, data := mount.ParseOptions(mountData) + logrus.Debugf("overlay: mount_data=%s", mountData) + if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil { + return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %w", mountTarget, mountData, err) + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } + mountpoint := path.Join(dir, "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { + return err + } + + unmounted := false + + mappedRoot := filepath.Join(d.home, id, "mapped") + // It should not happen, but cleanup any mapped mount if it was leaked. + if _, err := os.Stat(mappedRoot); err == nil { + mounts, err := os.ReadDir(mappedRoot) + if err == nil { + // Go through all of the mapped mounts. + for _, m := range mounts { + _ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH) + } + } + } + + if d.options.mountProgram != "" { + // Attempt to unmount the FUSE mount using either fusermount or fusermount3. + // If they fail, fallback to unix.Unmount + for _, v := range []string{"fusermount3", "fusermount"} { + err := exec.Command(v, "-u", mountpoint).Run() + if err != nil && !errors.Is(err, exec.ErrNotFound) { + logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err) + } + if err == nil { + unmounted = true + break + } + } + // If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all + // pending changes are propagated to the file system + if !unmounted { + fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0) + if err == nil { + if err := unix.Syncfs(fd); err != nil { + logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err) + } + unix.Close(fd) + } + } + } + + if !unmounted { + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + if !errors.Is(err, unix.EINVAL) { + return fmt.Errorf("unmounting %q: %w", mountpoint, err) + } + } + } + + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) + return fmt.Errorf("removing mount point %q: %w", mountpoint, err) + } + + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + entries, err := os.ReadDir(d.home) + if err != nil { + return nil, err + } + layers := make([]string, 0) + + for _, entry := range entries { + id := entry.Name() + switch id { + case linkDir, stagingDir, quota.BackingFsBlockDeviceLink, mountProgramFlagFile: + // expected, but not a layer. skip it + continue + default: + // Does it look like a datadir directory? + if !entry.IsDir() { + continue + } + layers = append(layers, id) + } + } + return layers, nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { + whiteoutFormat := archive.OverlayWhiteoutFormat + if d.options.mountProgram != "" { + // If we are using a mount program, we are most likely running + // as an unprivileged user that cannot use mknod, so fallback to the + // AUFS whiteout format. + whiteoutFormat = archive.AUFSWhiteoutFormat + } + return whiteoutFormat +} + +type overlayFileGetter struct { + diffDirs []string +} + +func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { + for _, d := range g.diffDirs { + f, err := os.Open(filepath.Join(d, path)) + if err == nil { + return f, nil + } + } + if len(g.diffDirs) > 0 { + return os.Open(filepath.Join(g.diffDirs[0], path)) + } + return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist) +} + +func (g *overlayFileGetter) Close() error { + return nil +} + +func (d *Driver) getStagingDir() string { + return filepath.Join(d.home, stagingDir) +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences, either for this layer, or one of our +// lowers if we're just a template directory. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := d.getDiffPath(id) + if err != nil { + return nil, err + } + paths, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil +} + +// CleanupStagingDirectory cleanups the staging directory. +func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { + return os.RemoveAll(stagingDirectory) +} + +func (d *Driver) supportsDataOnlyLayers() (bool, error) { + feature := "dataonly-layers" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(d.runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that data-only layers for overlay are supported") + return true, nil + } + logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported") + return false, errors.New(overlayCacheText) + } + supportsDataOnly, err := supportsDataOnlyLayers(d.home) + if err2 := cachedFeatureRecord(d.runhome, feature, supportsDataOnly, ""); err2 != nil { + return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2) + } + return supportsDataOnly, err +} + +func (d *Driver) useComposeFs() bool { + if !composeFsSupported() || unshare.IsRootless() { + return false + } + supportsDataOnlyLayers, err := d.supportsDataOnlyLayers() + if err != nil { + logrus.Debugf("Check for data-only layers failed with: %v", err) + return false + } + return supportsDataOnlyLayers +} + +// ApplyDiff applies the changes in the new layer using the specified function +func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) { + var idMappings *idtools.IDMappings + if options != nil { + idMappings = options.Mappings + } + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + var applyDir string + + if id == "" { + err := os.MkdirAll(d.getStagingDir(), 0o700) + if err != nil && !os.IsExist(err) { + return graphdriver.DriverWithDifferOutput{}, err + } + applyDir, err = os.MkdirTemp(d.getStagingDir(), "") + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + } else { + var err error + applyDir, err = d.getDiffPath(id) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + } + + logrus.Debugf("Applying differ in %s", applyDir) + + differOptions := graphdriver.DifferOptions{ + Format: graphdriver.DifferOutputFormatDir, + } + if d.useComposeFs() { + differOptions.Format = graphdriver.DifferOutputFormatFlat + } + out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + IgnoreChownErrors: d.options.ignoreChownErrors, + WhiteoutFormat: d.getWhiteoutFormat(), + InUserNS: unshare.IsRootless(), + }, &differOptions) + + out.Target = applyDir + + return out, err +} + +// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. +func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error { + if filepath.Dir(stagingDirectory) != d.getStagingDir() { + return fmt.Errorf("%q is not a staging directory", stagingDirectory) + } + + if d.useComposeFs() { + // FIXME: move this logic into the differ so we don't have to open + // the file twice. + verityDigests, err := enableVerityRecursive(stagingDirectory) + if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + logrus.Warningf("%s", err) + } + toc := diffOutput.Artifacts[tocArtifact] + if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil { + return err + } + } + diffPath, err := d.getDiffPath(id) + if err != nil { + return err + } + if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) { + return err + } + + diffOutput.UncompressedDigest = diffOutput.TOCDigest + + return os.Rename(stagingDirectory, diffPath) +} + +// DifferTarget gets the location where files are stored for the layer. +func (d *Driver) DifferTarget(id string) (string, error) { + return d.getDiffPath(id) +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { + if !d.isParent(id, parent) { + if d.options.ignoreChownErrors { + options.IgnoreChownErrors = d.options.ignoreChownErrors + } + if d.options.forceMask != nil { + options.ForceMask = d.options.forceMask + } + return d.naiveDiff.ApplyDiff(id, parent, options) + } + + idMappings := options.Mappings + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + applyDir, err := d.getDiffPath(id) + if err != nil { + return 0, err + } + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(options.Diff, applyDir, &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + IgnoreChownErrors: d.options.ignoreChownErrors, + ForceMask: d.options.forceMask, + WhiteoutFormat: d.getWhiteoutFormat(), + InUserNS: unshare.IsRootless(), + }); err != nil { + return 0, err + } + + return directory.Size(applyDir) +} + +func (d *Driver) getComposefsData(id string) string { + dir := d.dir(id) + return path.Join(dir, "composefs-data") +} + +func (d *Driver) getDiffPath(id string) (string, error) { + dir, imagestore, _ := d.dir2(id) + base := dir + if imagestore != "" { + base = imagestore + } + return redirectDiffIfAdditionalLayer(path.Join(base, "diff")) +} + +func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + for i, l := range layers { + layers[i], err = redirectDiffIfAdditionalLayer(l) + if err != nil { + return nil, err + } + } + return layers, nil +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + if !d.isParent(id, parent) { + return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) + } + + p, err := d.getDiffPath(id) + if err != nil { + return 0, err + } + return directory.Size(p) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + if d.useNaiveDiff() || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + lowerDirs, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + + diffPath, err := d.getDiffPath(id) + if err != nil { + return nil, err + } + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + WhiteoutFormat: d.getWhiteoutFormat(), + WhiteoutData: lowerDirs, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + if d.useNaiveDiff() || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath, err := d.getDiffPath(id) + if err != nil { + return nil, err + } + layers, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return d.options.imageStores +} + +// UpdateLayerIDMap updates ID mappings in a from matching the ones specified +// by toContainer to those specified by toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + var err error + dir, imagestore, _ := d.dir2(id) + base := dir + if imagestore != "" { + base = imagestore + } + diffDir := filepath.Join(base, "diff") + + rootUID, rootGID := 0, 0 + if toHost != nil { + rootUID, rootGID, err = idtools.GetRootUIDGID(toHost.UIDs(), toHost.GIDs()) + if err != nil { + return err + } + } + + // Mount the new layer and handle ownership changes and possible copy_ups in it. + options := graphdriver.MountOpts{ + MountLabel: mountLabel, + Options: strings.Split(d.options.mountOptions, ","), + } + layerFs, err := d.get(id, true, options) + if err != nil { + return err + } + err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost) + if err != nil { + if err2 := d.Put(id); err2 != nil { + logrus.Errorf("%v; unmounting %v: %v", err, id, err2) + } + return err + } + if err = d.Put(id); err != nil { + return err + } + + // Rotate the diff directories. + i := 0 + perms := defaultPerms + st, err := os.Stat(nameWithSuffix(diffDir, i)) + if d.options.forceMask != nil { + perms = *d.options.forceMask + } else { + if err == nil { + perms = os.FileMode(st.Mode()) + } + } + for err == nil { + i++ + _, err = os.Stat(nameWithSuffix(diffDir, i)) + } + + for i > 0 { + err = os.Rename(nameWithSuffix(diffDir, i-1), nameWithSuffix(diffDir, i)) + if err != nil { + return err + } + i-- + } + + // We need to re-create the work directory as it might keep a reference + // to the old upper layer in the index. + workDir := filepath.Join(dir, "work") + if err := os.RemoveAll(workDir); err == nil { + if err := idtools.MkdirAs(workDir, defaultPerms, rootUID, rootGID); err != nil { + return err + } + } + + // Re-create the directory that we're going to use as the upper layer. + if err := idtools.MkdirAs(diffDir, perms, rootUID, rootGID); err != nil { + return err + } + return nil +} + +// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with +// overlay lower layers. +func (d *Driver) supportsIDmappedMounts() bool { + if d.supportsIDMappedMounts != nil { + return *d.supportsIDMappedMounts + } + + supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome) + d.supportsIDMappedMounts = &supportsIDMappedMounts + if err == nil { + return supportsIDMappedMounts + } + logrus.Debugf("Check for idmapped mounts support %v", err) + return false +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + if os.Getenv("_CONTAINERS_OVERLAY_DISABLE_IDMAP") == "yes" { + return false + } + if d.options.mountProgram != "" { + return true + } + return d.supportsIDmappedMounts() +} + +// dumbJoin is more or less a dumber version of filepath.Join, but one which +// won't Clean() the path, allowing us to append ".." as a component and trust +// pathname resolution to do some non-obvious work. +func dumbJoin(names ...string) string { + if len(names) == 0 { + return string(os.PathSeparator) + } + return strings.Join(names, string(os.PathSeparator)) +} + +func nameWithSuffix(name string, number int) string { + if number == 0 { + return name + } + return fmt.Sprintf("%s%d", name, number) +} + +func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, error) { + refElem := base64.StdEncoding.EncodeToString([]byte(ref)) + for _, ls := range d.options.layerStores { + ref := "" + if ls.withReference { + ref = refElem + } + target := path.Join(ls.path, ref, dgst.String()) + // Check if all necessary files exist + for _, p := range []string{ + filepath.Join(target, "diff"), + filepath.Join(target, "info"), + filepath.Join(target, "blob"), + } { + if _, err := os.Stat(p); err != nil { + wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err) + return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown) + } + } + return target, nil + } + + return "", fmt.Errorf("additional layer (%q, %q) not found: %w", dgst, ref, graphdriver.ErrLayerUnknown) +} + +func (d *Driver) releaseAdditionalLayerByID(id string) { + if al, err := d.getAdditionalLayerPathByID(id); err == nil { + notifyReleaseAdditionalLayer(al) + } else if !os.IsNotExist(err) { + logrus.Warnf("Unexpected error on reading Additional Layer Store pointer %v", err) + } +} + +// additionalLayer represents a layer in Additional Layer Store. +type additionalLayer struct { + path string + d *Driver + releaseOnce sync.Once +} + +// Info returns arbitrary information stored along with this layer (i.e. `info` file). +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (al *additionalLayer) Info() (io.ReadCloser, error) { + return os.Open(filepath.Join(al.path, "info")) +} + +// Blob returns a reader of the raw contents of this layer. +func (al *additionalLayer) Blob() (io.ReadCloser, error) { + return os.Open(filepath.Join(al.path, "blob")) +} + +// CreateAs creates a new layer from this additional layer. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (al *additionalLayer) CreateAs(id, parent string) error { + // TODO: support opts + if err := al.d.Create(id, parent, nil); err != nil { + return err + } + dir := al.d.dir(id) + diffDir := path.Join(dir, "diff") + if err := os.RemoveAll(diffDir); err != nil { + return err + } + // tell the additional layer store that we use this layer. + // mark this layer as "additional layer" + if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0o644); err != nil { + return err + } + notifyUseAdditionalLayer(al.path) + return os.Symlink(filepath.Join(al.path, "diff"), diffDir) +} + +func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) { + al, err := os.ReadFile(path.Join(d.dir(id), "additionallayer")) + if err != nil { + return "", err + } + return string(al), nil +} + +// Release tells the additional layer store that we don't use this handler. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (al *additionalLayer) Release() { + // Tell the additional layer store that we don't use this layer handler. + // This will decrease the reference counter on the store's side, which was + // increased in LookupAdditionalLayer (so this must be called only once). + al.releaseOnce.Do(func() { + notifyReleaseAdditionalLayer(al.path) + }) +} + +// notifyUseAdditionalLayer notifies Additional Layer Store that we use the specified layer. +// This is done by creating "use" file in the layer directory. This is useful for +// Additional Layer Store to consider when to perform GC. Notification-aware Additional +// Layer Store must return ENOENT. +func notifyUseAdditionalLayer(al string) { + if !path.IsAbs(al) { + logrus.Warnf("additionallayer must be absolute (got: %v)", al) + return + } + useFile := path.Join(al, "use") + f, err := os.Create(useFile) + if os.IsNotExist(err) { + return + } else if err == nil { + f.Close() + if err := os.Remove(useFile); err != nil { + logrus.Warnf("Failed to remove use file") + } + } + logrus.Warnf("Unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err) +} + +// notifyReleaseAdditionalLayer notifies Additional Layer Store that we don't use the specified +// layer anymore. This is done by rmdir-ing the layer directory. This is useful for +// Additional Layer Store to consider when to perform GC. Notification-aware Additional +// Layer Store must return ENOENT. +func notifyReleaseAdditionalLayer(al string) { + if !path.IsAbs(al) { + logrus.Warnf("additionallayer must be absolute (got: %v)", al) + return + } + // tell the additional layer store that we don't use this layer anymore. + err := unix.Rmdir(al) + if os.IsNotExist(err) { + return + } + logrus.Warnf("Unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err) +} + +// redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and +// returns the redirected path. If the passed diff is not the one in Additional Layer +// Store, it returns the original path without changes. +func redirectDiffIfAdditionalLayer(diffPath string) (string, error) { + if ld, err := os.Readlink(diffPath); err == nil { + // diff is the link to Additional Layer Store + if !path.IsAbs(ld) { + return "", fmt.Errorf("linkpath must be absolute (got: %q)", ld) + } + diffPath = ld + } else if err.(*os.PathError).Err != syscall.EINVAL { + return "", err + } + return diffPath, nil +} + +// getMappedMountRoot is a heuristic that calculates the parent directory where +// the idmapped mount should be applied. +// It is useful to minimize the number of idmapped mounts and at the same time use +// a common path as long as possible to reduce the length of the mount data argument. +func getMappedMountRoot(path string) string { + dirName := filepath.Dir(path) + if filepath.Base(dirName) == linkDir { + return filepath.Dir(dirName) + } + return dirName +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go new file mode 100644 index 0000000000..88bfbf9c74 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go @@ -0,0 +1,22 @@ +//go:build linux && cgo +// +build linux,cgo + +package overlay + +import ( + "path" + + "github.com/containers/storage/pkg/directory" +) + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For Overlay, it attempts to check the XFS quota for size, and falls back to +// finding the size of the "diff" directory. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + usage := &directory.DiskUsage{} + if d.quotaCtl != nil { + err := d.quotaCtl.GetDiskUsage(d.dir(id), usage) + return usage, err + } + return directory.Usage(path.Join(d.dir(id), "diff")) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go new file mode 100644 index 0000000000..2a7a307a29 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -0,0 +1,17 @@ +//go:build linux && !cgo +// +build linux,!cgo + +package overlay + +import ( + "path" + + "github.com/containers/storage/pkg/directory" +) + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For Overlay, it attempts to check the XFS quota for size, and falls back to +// finding the size of the "diff" directory. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(path.Join(d.dir(id), "diff")) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go new file mode 100644 index 0000000000..33b163a8c2 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package overlay + +func SupportsNativeOverlay(graphroot, rundir string) (bool, error) { + return false, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go new file mode 100644 index 0000000000..6519900893 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go @@ -0,0 +1,82 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("Generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("reading random number generator, retried for %v: %w", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == unix.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go new file mode 100644 index 0000000000..2670ef3df9 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go @@ -0,0 +1,20 @@ +//go:build linux +// +build linux + +package overlayutils + +import ( + "fmt" + + graphdriver "github.com/containers/storage/drivers" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type is not supported." + return fmt.Errorf("%s: %w", msg, graphdriver.ErrNotSupported) +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go new file mode 100644 index 0000000000..2be79698d3 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -0,0 +1,453 @@ +//go:build linux && !exclude_disk_quota && cgo +// +build linux,!exclude_disk_quota,cgo + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef FS_PROJ_QUOTA +#define FS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" + +import ( + "errors" + "fmt" + "math" + "os" + "path" + "path/filepath" + "sync" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/directory" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const projectIDsAllocatedPerQuotaHome = 10000 + +// BackingFsBlockDeviceLink is the name of a file that we place in +// the home directory of a driver that uses this package. +const BackingFsBlockDeviceLink = "backingFsBlockDev" + +// Quota limit params - currently we only control blocks hard limit and inodes +type Quota struct { + Size uint64 + Inodes uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas *sync.Map + basePath string +} + +// Attempt to generate a unigue projectid. Multiple directories +// per file system can have quota and they need a group of unique +// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) +// unique projectids, based on the inode of the basepath. +func generateUniqueProjectID(path string) (uint32, error) { + fileinfo, err := os.Stat(path) + if err != nil { + return 0, err + } + stat, ok := fileinfo.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("not a syscall.Stat_t %s", path) + } + projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) + return uint32(projectID), nil +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the basePath directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects +// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects +// echo storage:100000 >> /etc/projid +// echo volumes:200000 >> /etc/projid +// xfs_quota -x -c 'project -s storage volumes' / +// +// In the example above, the storage directory project id will be used as a +// "start offset" and all containers will be assigned larger project ids +// (e.g. >= 100000). Then the volumes directory project id will be used as a +// "start offset" and all volumes will be assigned larger project ids +// (e.g. >= 200000). +// This is a way to prevent xfs_quota management from conflicting with +// containers/storage. + +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + if minProjectID == 0 { + // Indicates the storage was never initialized + // Generate a unique range of Projectids for this basepath + minProjectID, err = generateUniqueProjectID(basePath) + if err != nil { + return nil, err + } + + } + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + Inodes: 0, + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: &sync.Map{}, + basePath: basePath, + } + + if err := q.setProjectQuota(minProjectID, quota); err != nil { + return nil, err + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID() + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas.Store(targetPath, projectID) + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) + return q.setProjectQuota(projectID, quota) +} + +// ClearQuota removes the map entry in the quotas map for targetPath. +// It does so to prevent the map leaking entries as directories are deleted. +func (q *Control) ClearQuota(targetPath string) { + q.quotas.Delete(targetPath) +} + +// setProjectQuota - set the quota for project id on xfs block device +func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.FS_PROJ_QUOTA + + if quota.Size > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + } + if quota.Inodes > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT + d.d_ino_hardlimit = C.__u64(quota.Inodes) + d.d_ino_softlimit = d.d_ino_hardlimit + } + + cs := C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + runQuotactl := func() syscall.Errno { + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + return errno + } + + errno := runQuotactl() + + // If the backingFsBlockDev does not exist any more then try to recreate it. + if errors.Is(errno, unix.ENOENT) { + if _, err := makeBackingFsDev(q.basePath); err != nil { + return fmt.Errorf( + "failed to recreate missing backingFsBlockDev %s for projid %d: %w", + q.backingFsBlockDev, projectID, err, + ) + } + + if errno := runQuotactl(); errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w", + projectID, q.backingFsBlockDev, errno) + } + + } else if errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + quota.Inodes = uint64(d.d_ino_hardlimit) + return nil +} + +// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota +func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + usage.Size = int64(d.d_bcount) * 512 + usage.InodeCount = int64(d.d_icount) + + return nil +} + +func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { + var d C.fs_disk_quota_t + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } + if !ok { + return d, fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + cs := C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) + } + + return d, nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID() error { + files, err := os.ReadDir(q.basePath) + if err != nil { + return fmt.Errorf("read directory failed : %s", q.basePath) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(q.basePath, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas.Store(path, projid) + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir, errno := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno) + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(home, &stat); err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink) + backingFsBlockDevTmp := backingFsBlockDev + ".tmp" + // Re-create just in case someone copied the home directory over to a new device + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + if !errors.Is(err, unix.EEXIST) { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + // On EEXIST, try again after unlinking any potential leftover. + _ = unix.Unlink(backingFsBlockDevTmp) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + } + if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { + return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go new file mode 100644 index 0000000000..648fd3379c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go @@ -0,0 +1,37 @@ +//go:build !linux || exclude_disk_quota || !cgo +// +build !linux exclude_disk_quota !cgo + +package quota + +import ( + "errors" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 + Inodes uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct{} + +func NewControl(basePath string) (*Control, error) { + return nil, errors.New("filesystem does not support, or has not enabled quotas") +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + return errors.New("filesystem does not support, or has not enabled quotas") +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + return errors.New("filesystem does not support, or has not enabled quotas") +} + +// ClearQuota removes the map entry in the quotas map for targetPath. +// It does so to prevent the map leaking entries as directories are deleted. +func (q *Control) ClearQuota(targetPath string) {} diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go new file mode 100644 index 0000000000..bbb9cb6570 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go @@ -0,0 +1,9 @@ +//go:build !exclude_graphdriver_aufs && linux +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/containers/storage/drivers/aufs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go new file mode 100644 index 0000000000..425ebd798e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go @@ -0,0 +1,9 @@ +//go:build !exclude_graphdriver_btrfs && linux +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/containers/storage/drivers/btrfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go new file mode 100644 index 0000000000..a744eaea17 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go @@ -0,0 +1,9 @@ +//go:build !exclude_graphdriver_devicemapper && linux && cgo +// +build !exclude_graphdriver_devicemapper,linux,cgo + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/containers/storage/drivers/devmapper" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go new file mode 100644 index 0000000000..95b77b73e2 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -0,0 +1,9 @@ +//go:build !exclude_graphdriver_overlay && linux && cgo +// +build !exclude_graphdriver_overlay,linux,cgo + +package register + +import ( + // register the overlay graphdriver + _ "github.com/containers/storage/drivers/overlay" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_vfs.go b/vendor/github.com/containers/storage/drivers/register/register_vfs.go new file mode 100644 index 0000000000..691ce85929 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/containers/storage/drivers/vfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_windows.go b/vendor/github.com/containers/storage/drivers/register/register_windows.go new file mode 100644 index 0000000000..048b27097d --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/containers/storage/drivers/windows" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go new file mode 100644 index 0000000000..8e5788a437 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -0,0 +1,9 @@ +//go:build (!exclude_graphdriver_zfs && linux) || (!exclude_graphdriver_zfs && freebsd) || solaris +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris + +package register + +import ( + // register the zfs driver + _ "github.com/containers/storage/drivers/zfs" +) diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go new file mode 100644 index 0000000000..66ab89f7f7 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/template.go @@ -0,0 +1,52 @@ +package graphdriver + +import ( + "github.com/containers/storage/pkg/idtools" + "github.com/sirupsen/logrus" +) + +// TemplateDriver is just barely enough of a driver that we can implement a +// naive version of CreateFromTemplate on top of it. +type TemplateDriver interface { + DiffDriver + CreateReadWrite(id, parent string, opts *CreateOpts) error + Create(id, parent string, opts *CreateOpts) error + Remove(id string) error +} + +// CreateFromTemplate creates a layer with the same contents and parent as +// another layer. Internally, it may even depend on that other layer +// continuing to exist, as if it were actually a child of the child layer. +func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error { + var err error + if readWrite { + err = d.CreateReadWrite(id, parent, opts) + } else { + err = d.Create(id, parent, opts) + } + if err != nil { + return err + } + diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel) + if err != nil { + if err2 := d.Remove(id); err2 != nil { + logrus.Errorf("Removing layer %q: %v", id, err2) + } + return err + } + defer diff.Close() + + applyOptions := ApplyDiffOpts{ + Diff: diff, + Mappings: templateIDMappings, + MountLabel: opts.MountLabel, + IgnoreChownErrors: opts.ignoreChownErrors, + } + if _, err = d.ApplyDiff(id, parent, applyOptions); err != nil { + if err2 := d.Remove(id); err2 != nil { + logrus.Errorf("Removing layer %q: %v", id, err2) + } + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go new file mode 100644 index 0000000000..bf22a5f6fd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go @@ -0,0 +1,7 @@ +package vfs + +import "github.com/containers/storage/drivers/copy" + +func dirCopy(srcDir, dstDir string) error { + return copy.DirCopy(srcDir, dstDir, copy.Content, true) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go new file mode 100644 index 0000000000..d94756bdd1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go @@ -0,0 +1,10 @@ +//go:build !linux +// +build !linux + +package vfs // import "github.com/containers/storage/drivers/vfs" + +import "github.com/containers/storage/pkg/chrootarchive" + +func dirCopy(srcDir, dstDir string) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go new file mode 100644 index 0000000000..599cf095d2 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -0,0 +1,330 @@ +package vfs + +import ( + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" +) + +const defaultPerms = os.FileMode(0o555) + +func init() { + graphdriver.MustRegister("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + d := &Driver{ + name: "vfs", + homes: []string{home}, + idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + } + + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Join(home, "dir"), 0o700, rootIDs); err != nil { + return nil, err + } + for _, option := range options.DriverOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "vfs.imagestore", ".imagestore": + d.homes = append(d.homes, strings.Split(val, ",")...) + continue + case "vfs.mountopt": + return nil, fmt.Errorf("vfs driver does not support mount options") + case ".ignore_chown_errors", "vfs.ignore_chown_errors": + logrus.Debugf("vfs: ignore_chown_errors=%s", val) + var err error + d.ignoreChownErrors, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("vfs driver does not support %s options", key) + } + } + // If --imagestore is provided, lets add writable graphRoot + // to vfs's additional image store, as it is done for + // `overlay` driver. + if options.ImageStore != "" { + d.homes = append(d.homes, options.ImageStore) + } + d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d) + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater) + + return d, nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + name string + homes []string + idMappings *idtools.IDMappings + ignoreChownErrors bool + naiveDiff graphdriver.DiffDriver + updater graphdriver.LayerIDMapUpdater +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil //nolint: nilnil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := d.dir(id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if readWrite { + return d.CreateReadWrite(id, template, opts) + } + return d.Create(id, template, opts) +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { + if d.ignoreChownErrors { + options.IgnoreChownErrors = d.ignoreChownErrors + } + return d.naiveDiff.ApplyDiff(id, parent, options) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, false) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + idMappings := d.idMappings + if opts != nil && opts.IDMappings != nil { + idMappings = opts.IDMappings + } + + dir := d.dir(id) + rootIDs := idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil { + return err + } + + defer func() { + if retErr != nil { + os.RemoveAll(dir) + } + }() + + rootPerms := defaultPerms + if runtime.GOOS == "darwin" { + rootPerms = os.FileMode(0o700) + } + + if parent != "" { + st, err := system.Stat(d.dir(parent)) + if err != nil { + return err + } + rootPerms = os.FileMode(st.Mode()) + rootIDs.UID = int(st.UID()) + rootIDs.GID = int(st.GID()) + } + if err := idtools.MkdirAndChown(dir, rootPerms, rootIDs); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent != "" { + parentDir, err := d.Get(parent, graphdriver.MountOpts{}) + if err != nil { + return fmt.Errorf("%s: %w", parent, err) + } + if err := dirCopy(parentDir, dir); err != nil { + return err + } + } + + return nil +} + +func (d *Driver) dir(id string) string { + for i, home := range d.homes { + if i > 0 { + home = filepath.Join(home, d.String()) + } + candidate := filepath.Join(home, "dir", filepath.Base(id)) + fi, err := os.Stat(candidate) + if err == nil && fi.IsDir() { + return candidate + } + } + return filepath.Join(d.homes[0], "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + return system.EnsureRemoveAll(d.dir(id)) +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + dir := d.dir(id) + + for _, opt := range options.Options { + if opt == "ro" { + // ignore "ro" option + continue + } + return "", fmt.Errorf("vfs driver does not support mount options") + } + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For VFS, it queries the directory for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.dir(id)) +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir")) + if err != nil { + return nil, err + } + + layers := make([]string, 0) + + for _, entry := range entries { + id := entry.Name() + // Does it look like a datadir directory? + if !entry.IsDir() { + continue + } + + layers = append(layers, id) + } + + return layers, err +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + if len(d.homes) > 1 { + return d.homes[1:] + } + return nil +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + return d.updater.SupportsShifting() +} + +// UpdateLayerIDMap updates ID mappings in a from matching the ones specified +// by toContainer to those specified by toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + if err := d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel); err != nil { + return err + } + dir := d.dir(id) + rootIDs, err := toHost.ToHost(idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + return err + } + return os.Chown(dir, rootIDs.UID, rootIDs.GID) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + return d.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + return d.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) +} diff --git a/vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go b/vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go new file mode 100644 index 0000000000..fa14126301 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go @@ -0,0 +1,5 @@ +package windows + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go new file mode 100644 index 0000000000..8c2dc18aec --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -0,0 +1,1010 @@ +//go:build windows +// +build windows + +package windows + +import ( + "archive/tar" + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/longpath" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" + units "github.com/docker/go-units" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.MustRegister("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct{} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + for _, option := range options.DriverOptions { + if strings.HasPrefix(option, "windows.mountopt=") { + return nil, fmt.Errorf("windows driver does not support mount options") + } else { + return nil, fmt.Errorf("option %s not supported", option) + } + } + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + if err := idtools.MkdirAllAs(home, 0o700, 0, 0); err != nil { + return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = windows.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = windows.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// panicIfUsedByLcow does exactly what it says. +// TODO @jhowardmsft - this is a temporary measure for the bring-up of +// Linux containers on Windows. It is a failsafe to ensure that the right +// graphdriver is used. +func panicIfUsedByLcow() { + if system.LCOWSupported() { + panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + osv := system.GetOSVersion() + for { + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enhancing the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { + break + } + retryCount++ + time.Sleep(10 * time.Millisecond) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, options.MountLabel) + var dir string + + switch len(options.Options) { + case 0: + case 1: + if options.Options[0] == "ro" { + // ignore "ro" option + break + } + fallthrough + default: + return "", fmt.Errorf("windows driver does not support mount options") + } + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", id, err) + } + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For VFS, it queries the directory for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.dir(id)) +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + _, exists := d.cache[rID] + delete(d.cache, rID) + d.cacheMu.Unlock() + + // If the cache was not populated, then the layer was left unprepared and deactivated + if !exists { + return nil + } + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + items, err := os.ReadDir(d.info.HomeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { + logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + } else { + logrus.Infof("Cleaned up %s", item.Name()) + } + } + } + + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ io.ReadCloser, err error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (int64, error) { + panicIfUsedByLcow() + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, options.Diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + panicIfUsedByLcow() + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, idMappings, rPId, parentMappings, mountLabel) + if err != nil { + return + } + + layerFs, err := d.Get(id, graphdriver.MountOpts{}) + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// Metadata returns custom driver information. +func (d *Driver) Metadata(id string) (map[string]string, error) { + panicIfUsedByLcow() + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec output: %s: error: %w", output, err) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0o600) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := os.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = os.WriteFile(jPath, content, 0o600) + if err != nil { + return fmt.Errorf("unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. Also use sequential file access to avoid depleting the + // standby list - Microsoft VSO Bug Tracker #9900466 + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := windows.UTF16FromString(path) + if err != nil { + return err + } + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + panicIfUsedByLcow() + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} + +// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from +// matching those in toContainer to matching those in toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("windows doesn't support changing ID mappings") +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + return false +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS new file mode 100644 index 0000000000..9c270c541f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go new file mode 100644 index 0000000000..e022897848 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -0,0 +1,518 @@ +//go:build linux || freebsd +// +build linux freebsd + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "time" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + zfs "github.com/mistifyio/go-zfs/v3" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +type zfsOptions struct { + fsName string + mountPath string + mountOptions string +} + +const defaultPerms = os.FileMode(0o555) + +func init() { + graphdriver.MustRegister("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.WithField("storage-driver", "zfs").Debugf("%s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { + var err error + + logger := logrus.WithField("storage-driver", "zfs") + + if _, err := exec.LookPath("zfs"); err != nil { + logger.Debugf("zfs command is not available: %v", err) + return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites) + } + + file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0o600) + if err != nil { + logger.Debugf("cannot open /dev/zfs: %v", err) + return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites) + } + defer unix.Close(file) + + options, err := parseOptions(opt.DriverOptions) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("cannot find root filesystem %s: %w", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps) + if err != nil { + return nil, fmt.Errorf("failed to get root uid/gid: %w", err) + } + if err := idtools.MkdirAllAs(base, 0o700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("failed to create '%s': %w", base, err) + } + + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: opt.UIDMaps, + gidMaps: opt.GIDMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + case "zfs.mountopt": + options.mountOptions = val + default: + return options, fmt.Errorf("unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("failed to access '%s': %w", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := unix.Stat(m.Mountpoint, &stat); err != nil { + logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.FSType == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is called on when program exits, it is a no-op for ZFS. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// Metadata returns image/container metadata related to graph driver +func (d *Driver) Metadata(id string) (map[string]string, error) { + return map[string]string{ + "Mountpoint": d.mountPath(id), + "Dataset": d.zfsPath(id), + }, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + err := d.create(id, parent, opts) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, opts) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + name := d.zfsPath(id) + mountpoint := d.mountPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + var rootUID, rootGID int + var mountLabel string + if opts != nil { + rootUID, rootGID, err = idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs()) + if err != nil { + return fmt.Errorf("failed to get root uid/gid: %w", err) + } + mountLabel = opts.MountLabel + } + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + + if err := idtools.MkdirAllAs(mountpoint, defaultPerms, rootUID, rootGID); err != nil { + return err + } + defer func() { + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err) + } + }() + + mountOpts := label.FormatMountLabel(d.options.mountOptions, mountLabel) + + if err := mount.Mount(name, mountpoint, "zfs", mountOpts); err != nil { + return fmt.Errorf("creating zfs mount: %w", err) + } + defer func() { + if err := detachUnmount(mountpoint); err != nil { + logrus.Warnf("failed to unmount %s mount %s: %v", id, mountpoint, err) + } + }() + + if err := os.Chmod(mountpoint, defaultPerms); err != nil { + return fmt.Errorf("setting permissions on zfs mount: %w", err) + } + + // this is our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + return fmt.Errorf("modifying zfs mountpoint (%s) ownership: %w", mountpoint, err) + } + + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mountpoint); c <= 0 { + if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil { + logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr) + } + if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr) + } + + } + } + }() + + // In the case of a read-only mount we first mount read-write so we can set the + // correct permissions on the mount point and remount read-only afterwards. + remountReadOnly := false + mountOptions := d.options.mountOptions + if len(options.Options) > 0 { + var newOptions []string + for _, option := range options.Options { + if option == "ro" { + // Filter out read-only mount option but remember for later remounting. + remountReadOnly = true + } else { + newOptions = append(newOptions, option) + } + } + mountOptions = strings.Join(newOptions, ",") + } + + filesystem := d.zfsPath(id) + opts := label.FormatMountLabel(mountOptions, options.MountLabel) + logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0o755, rootUID, rootGID); err != nil { + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { + return "", fmt.Errorf("creating zfs mount: %w", err) + } + + if remountReadOnly { + opts = label.FormatMountLabel("remount,ro", options.MountLabel) + if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { + return "", fmt.Errorf("remounting zfs mount read-only: %w", err) + } + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + + logger := logrus.WithField("storage-driver", "zfs") + + logger.Debugf(`unmount("%s")`, mountpoint) + + if err := detachUnmount(mountpoint); err != nil { + logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) + } + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logger.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err) + } + + return nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For ZFS, it queries the full mount path for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.mountPath(id)) +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] +} + +// List layers (not including additional image stores). Our layers aren't all +// dependent on a single well-known dataset, so we can't reliably tell which +// datasets are ours and which ones just look like they could be ours. +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go new file mode 100644 index 0000000000..c3c73c61e7 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -0,0 +1,33 @@ +package zfs + +import ( + "fmt" + + graphdriver "github.com/containers/storage/drivers" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func checkRootdirFs(rootdir string) error { + var buf unix.Statfs_t + if err := unix.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) + return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootdir, graphdriver.ErrPrerequisites) + } + + return nil +} + +func getMountpoint(id string) string { + return id +} + +func detachUnmount(mountpoint string) error { + // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH + return unix.Unmount(mountpoint, unix.MNT_FORCE) +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go new file mode 100644 index 0000000000..d43ba5c2b1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -0,0 +1,35 @@ +package zfs + +import ( + "fmt" + + graphdriver "github.com/containers/storage/drivers" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func checkRootdirFs(rootDir string) error { + fsMagic, err := graphdriver.GetFSMagic(rootDir) + if err != nil { + return err + } + backingFS := "unknown" + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFS = fsName + } + + if fsMagic != graphdriver.FsMagicZfs { + logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") + return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootDir, graphdriver.ErrPrerequisites) + } + + return nil +} + +func getMountpoint(id string) string { + return id +} + +func detachUnmount(mountpoint string) error { + return unix.Unmount(mountpoint, unix.MNT_DETACH) +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go new file mode 100644 index 0000000000..738b0ae1be --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go @@ -0,0 +1,4 @@ +//go:build !linux && !freebsd +// +build !linux,!freebsd + +package zfs diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go new file mode 100644 index 0000000000..de6e377541 --- /dev/null +++ b/vendor/github.com/containers/storage/errors.go @@ -0,0 +1,65 @@ +package storage + +import ( + "errors" + + "github.com/containers/storage/types" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID. + ErrContainerUnknown = types.ErrContainerUnknown + // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. + ErrDigestUnknown = types.ErrDigestUnknown + // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. + ErrDuplicateID = types.ErrDuplicateID + // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. + ErrDuplicateImageNames = types.ErrDuplicateImageNames + // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. + ErrDuplicateLayerNames = types.ErrDuplicateLayerNames + // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. + ErrDuplicateName = types.ErrDuplicateName + // ErrImageUnknown indicates that there was no image with the specified name or ID. + ErrImageUnknown = types.ErrImageUnknown + // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. + ErrImageUsedByContainer = types.ErrImageUsedByContainer + // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. + ErrIncompleteOptions = types.ErrIncompleteOptions + // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. + ErrInvalidBigDataName = types.ErrInvalidBigDataName + // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. + ErrLayerHasChildren = types.ErrLayerHasChildren + // ErrLayerNotMounted is returned when the requested information can only be computed for a mounted layer, and the layer is not mounted. + ErrLayerNotMounted = types.ErrLayerNotMounted + // ErrLayerUnknown indicates that there was no layer with the specified name or ID. + ErrLayerUnknown = types.ErrLayerUnknown + // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. + ErrLayerUsedByContainer = types.ErrLayerUsedByContainer + // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. + ErrLayerUsedByImage = types.ErrLayerUsedByImage + // ErrLoadError indicates that there was an initialization error. + ErrLoadError = types.ErrLoadError + // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. + ErrNotAContainer = types.ErrNotAContainer + // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. + ErrNotALayer = types.ErrNotALayer + // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. + ErrNotAnID = types.ErrNotAnID + // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. + ErrNotAnImage = types.ErrNotAnImage + // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. + ErrParentIsContainer = types.ErrParentIsContainer + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. + ErrParentUnknown = types.ErrParentUnknown + // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. + ErrSizeUnknown = types.ErrSizeUnknown + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = types.ErrStoreIsReadOnly + // ErrNotSupported is returned when the requested functionality is not supported. + ErrNotSupported = types.ErrNotSupported + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = types.ErrInvalidMappings + // ErrInvalidNameOperation is returned when updateName is called with invalid operation. + // Internal error + errInvalidUpdateNameOperation = errors.New("invalid update name operation") +) diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go new file mode 100644 index 0000000000..be9e45cfd0 --- /dev/null +++ b/vendor/github.com/containers/storage/idset.go @@ -0,0 +1,265 @@ +package storage + +import ( + "fmt" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/types" + "github.com/google/go-intervals/intervalset" +) + +// idSet represents a set of integer IDs. It is stored as an ordered set of intervals. +type idSet struct { + set *intervalset.ImmutableSet +} + +func newIDSet(intervals []interval) *idSet { + s := intervalset.Empty() + for _, i := range intervals { + s.Add(intervalset.NewSet([]intervalset.Interval{i})) + } + return &idSet{set: s.ImmutableSet()} +} + +// getHostIDs returns all the host ids in the id map. +func getHostIDs(idMaps []idtools.IDMap) *idSet { + var intervals []interval + for _, m := range idMaps { + intervals = append(intervals, interval{start: m.HostID, end: m.HostID + m.Size}) + } + return newIDSet(intervals) +} + +// getContainerIDs returns all the container ids in the id map. +func getContainerIDs(idMaps []idtools.IDMap) *idSet { + var intervals []interval + for _, m := range idMaps { + intervals = append(intervals, interval{start: m.ContainerID, end: m.ContainerID + m.Size}) + } + return newIDSet(intervals) +} + +// subtract returns the subtraction of `s` and `t`. `s` and `t` are unchanged. +func (s *idSet) subtract(t *idSet) *idSet { + if s == nil || t == nil { + return s + } + return &idSet{set: s.set.Sub(t.set)} +} + +// union returns the union of `s` and `t`. `s` and `t` are unchanged. +func (s *idSet) union(t *idSet) *idSet { + if s == nil { + return t + } else if t == nil { + return s + } + return &idSet{set: s.set.Union(t.set)} +} + +// Methods to iterate over the intervals of the idSet. intervalset doesn't provide one :-( + +// iterator to idSet. Returns nil if iteration finishes. +type iteratorFn func() *interval + +// cancelFn must be called exactly once unless iteratorFn returns nil, otherwise go routine might +// leak. +type cancelFn func() + +func (s *idSet) iterator() (iteratorFn, cancelFn) { + if s == nil { + return func() *interval { return nil }, func() {} + } + cancelCh := make(chan byte) + dataCh := make(chan interval) + go func() { + s.set.Intervals(func(ii intervalset.Interval) bool { + select { + case <-cancelCh: + return false + case dataCh <- ii.(interval): + return true + } + }) + close(dataCh) + }() + iterator := func() *interval { + i, ok := <-dataCh + if !ok { + return nil + } + return &i + } + return iterator, func() { close(cancelCh) } +} + +// size returns the total number of ids in the ID set. +func (s *idSet) size() int { + var size int + iterator, cancel := s.iterator() + defer cancel() + for i := iterator(); i != nil; i = iterator() { + size += i.length() + } + return size +} + +// findAvailable finds the `n` ids from `s`. +func (s *idSet) findAvailable(n int) (*idSet, error) { + var intervals []intervalset.Interval + iterator, cancel := s.iterator() + defer cancel() + for i := iterator(); n > 0 && i != nil; i = iterator() { + i.end = minInt(i.end, i.start+n) + intervals = append(intervals, *i) + n -= i.length() + } + if n > 0 { + return nil, types.ErrNoAvailableIDs + } + return &idSet{set: intervalset.NewImmutableSet(intervals)}, nil +} + +// zip creates an id map from `s` (host ids) and container ids. +func (s *idSet) zip(container *idSet) []idtools.IDMap { + hostIterator, hostCancel := s.iterator() + defer hostCancel() + containerIterator, containerCancel := container.iterator() + defer containerCancel() + var out []idtools.IDMap + for h, c := hostIterator(), containerIterator(); h != nil && c != nil; { + if n := minInt(h.length(), c.length()); n > 0 { + out = append(out, idtools.IDMap{ + ContainerID: c.start, + HostID: h.start, + Size: n, + }) + h.start += n + c.start += n + } + if h.IsZero() { + h = hostIterator() + } + if c.IsZero() { + c = containerIterator() + } + } + return out +} + +// interval represents an interval of integers [start, end). Note it is allowed to have +// start >= end, in which case it is treated as an empty interval. It implements interface +// intervalset.Interval. +type interval struct { + // Start of the interval (inclusive). + start int + // End of the interval (exclusive). + end int +} + +func (i interval) length() int { + return maxInt(0, i.end-i.start) +} + +func (i interval) Intersect(other intervalset.Interval) intervalset.Interval { + j := other.(interval) + return interval{start: maxInt(i.start, j.start), end: minInt(i.end, j.end)} +} + +func (i interval) Before(other intervalset.Interval) bool { + j := other.(interval) + return !i.IsZero() && !j.IsZero() && i.end < j.start +} + +func (i interval) IsZero() bool { + return i.length() <= 0 +} + +func (i interval) Bisect(other intervalset.Interval) (intervalset.Interval, intervalset.Interval) { + j := other.(interval) + if j.IsZero() { + return i, interval{} + } + // Subtracting [j.start, j.end) is equivalent to the union of intersecting (-inf, j.start) and + // [j.end, +inf). + left := interval{start: i.start, end: minInt(i.end, j.start)} + right := interval{start: maxInt(i.start, j.end), end: i.end} + return left, right +} + +func (i interval) Adjoin(other intervalset.Interval) intervalset.Interval { + j := other.(interval) + if !i.IsZero() && !j.IsZero() && (i.end == j.start || j.end == i.start) { + return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + } + return interval{} +} + +func (i interval) Encompass(other intervalset.Interval) intervalset.Interval { + j := other.(interval) + switch { + case i.IsZero(): + return j + case j.IsZero(): + return i + default: + return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + } +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a < b { + return b + } + return a +} + +func hasOverlappingRanges(mappings []idtools.IDMap) error { + hostIntervals := intervalset.Empty() + containerIntervals := intervalset.Empty() + + var conflicts []string + + for _, m := range mappings { + c := interval{start: m.ContainerID, end: m.ContainerID + m.Size} + h := interval{start: m.HostID, end: m.HostID + m.Size} + + added := false + overlaps := false + + containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + added = true + } + containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c})) + + hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps && !added { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + } + hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h})) + } + + if conflicts != nil { + if len(conflicts) == 1 { + return fmt.Errorf("the specified UID and/or GID mapping %s conflicts with other mappings: %w", conflicts[0], ErrInvalidMappings) + } + return fmt.Errorf("the specified UID and/or GID mappings %s conflict with other mappings: %w", strings.Join(conflicts, ", "), ErrInvalidMappings) + } + return nil +} diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go new file mode 100644 index 0000000000..d71eab08bb --- /dev/null +++ b/vendor/github.com/containers/storage/images.go @@ -0,0 +1,1097 @@ +package storage + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/stringutils" + "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + // ImageDigestManifestBigDataNamePrefix is a prefix of big data item + // names which we consider to be manifests, used for computing a + // "digest" value for the image as a whole, by which we can locate the + // image later. + ImageDigestManifestBigDataNamePrefix = "manifest" + // ImageDigestBigDataKey is provided for compatibility with older + // versions of the image library. It will be removed in the future. + ImageDigestBigDataKey = "manifest" +) + +// An Image is a reference to a layer and an associated metadata string. +type Image struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Digest is a digest value that we can use to locate the image, if one + // was specified at creation-time. + Digest digest.Digest `json:"digest,omitempty"` + + // Digests is a list of digest values of the image's manifests, and + // possibly a manually-specified value, that we can use to locate the + // image. If Digest is set, its value is also in this list. + Digests []digest.Digest `json:"-"` + + // Names is an optional set of user-defined convenience values. The + // image can be referred to by its ID or any of its names. Names are + // unique among images, and are often the text representation of tagged + // or canonical references. + Names []string `json:"names,omitempty"` + + // NamesHistory is an optional set of Names the image had in the past. The + // contained names are free from any duplicates, whereas the newest entry + // is the first one. + NamesHistory []string `json:"names-history,omitempty"` + + // TopLayer is the ID of the topmost layer of the image itself, if the + // image contains one or more layers. Multiple images can refer to the + // same top layer. + TopLayer string `json:"layer,omitempty"` + + // MappedTopLayers are the IDs of alternate versions of the top layer + // which have the same contents and parent, and which differ from + // TopLayer only in which ID mappings they use. When the image is + // to be removed, they should be removed before the TopLayer, as the + // graph driver may depend on that. + MappedTopLayers []string `json:"mapped-layers,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + + // Created is the datestamp for when this image was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // ReadOnly is true if this image resides in a read-only layer store. + ReadOnly bool `json:"-"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// roImageStore provides bookkeeping for information about Images. +type roImageStore interface { + roMetadataStore + roBigDataStore + + // startReading makes sure the store is fresh, and locks it for reading. + // If this succeeds, the caller MUST call stopReading(). + startReading() error + + // stopReading releases locks obtained by startReading. + stopReading() + + // Exists checks if there is an image with the given ID or name. + Exists(id string) bool + + // Get retrieves information about an image given an ID or name. + Get(id string) (*Image, error) + + // Images returns a slice enumerating the known images. + Images() ([]Image, error) + + // ByDigest returns a slice enumerating the images which have either an + // explicitly-set digest, or a big data item with a name that starts + // with ImageDigestManifestBigDataNamePrefix, which matches the + // specified digest. + ByDigest(d digest.Digest) ([]*Image, error) +} + +// rwImageStore provides bookkeeping for information about Images. +type rwImageStore interface { + roImageStore + rwMetadataStore + rwImageBigDataStore + flaggableStore + + // startWriting makes sure the store is fresh, and locks it for writing. + // If this succeeds, the caller MUST call stopWriting(). + startWriting() error + + // stopWriting releases locks obtained by startWriting. + stopWriting() + + // create creates an image that has a specified ID (or a random one) and + // optional names, using the specified layer as its topmost (hopefully + // read-only) layer. That layer can be referenced by multiple images. + create(id string, names []string, layer string, options ImageOptions) (*Image, error) + + // updateNames modifies names associated with an image based on (op, names). + // The values are expected to be valid normalized + // named image references. + updateNames(id string, names []string, op updateNameOperation) error + + // Delete removes the record of the image. + Delete(id string) error + + addMappedTopLayer(id, layer string) error + removeMappedTopLayer(id, layer string) error + + // Clean up unreferenced per-image data. + GarbageCollect() error + + // Wipe removes records of all images. + Wipe() error +} + +type imageStore struct { + // The following fields are only set when constructing imageStore, and must never be modified afterwards. + // They are safe to access without any other locking. + lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores. + dir string + + inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. + // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. + // Almost all users should use startReading() or startWriting(). + lastWrite lockfile.LastWrite + images []*Image + idindex *truncindex.TruncIndex + byid map[string]*Image + byname map[string]*Image + bydigest map[digest.Digest][]*Image +} + +func copyImage(i *Image) *Image { + return &Image{ + ID: i.ID, + Digest: i.Digest, + Digests: copyDigestSlice(i.Digests), + Names: copyStringSlice(i.Names), + NamesHistory: copyStringSlice(i.NamesHistory), + TopLayer: i.TopLayer, + MappedTopLayers: copyStringSlice(i.MappedTopLayers), + Metadata: i.Metadata, + BigDataNames: copyStringSlice(i.BigDataNames), + BigDataSizes: copyStringInt64Map(i.BigDataSizes), + BigDataDigests: copyStringDigestMap(i.BigDataDigests), + Created: i.Created, + ReadOnly: i.ReadOnly, + Flags: copyStringInterfaceMap(i.Flags), + } +} + +func copyImageSlice(slice []*Image) []*Image { + if len(slice) > 0 { + cp := make([]*Image, len(slice)) + for i := range slice { + cp[i] = copyImage(slice[i]) + } + return cp + } + return nil +} + +// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +// +// This is an internal implementation detail of imageStore construction, every other caller +// should use startReading() instead. +func (r *imageStore) startWritingWithReload(canReload bool) error { + r.lockfile.Lock() + r.inProcessLock.Lock() + succeeded := false + defer func() { + if !succeeded { + r.inProcessLock.Unlock() + r.lockfile.Unlock() + } + }() + + if canReload { + if _, err := r.reloadIfChanged(true); err != nil { + return err + } + } + + succeeded = true + return nil +} + +// startWriting makes sure the store is fresh, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +func (r *imageStore) startWriting() error { + return r.startWritingWithReload(true) +} + +// stopWriting releases locks obtained by startWriting. +func (r *imageStore) stopWriting() { + r.inProcessLock.Unlock() + r.lockfile.Unlock() +} + +// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +// +// This is an internal implementation detail of imageStore construction, every other caller +// should use startReading() instead. +func (r *imageStore) startReadingWithReload(canReload bool) error { + // inProcessLocked calls the nested function with r.inProcessLock held for writing. + inProcessLocked := func(fn func() error) error { + r.inProcessLock.Lock() + defer r.inProcessLock.Unlock() + return fn() + } + + r.lockfile.RLock() + unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil + defer func() { + if unlockFn != nil { + unlockFn() + } + }() + r.inProcessLock.RLock() + unlockFn = r.stopReading + + if canReload { + // If we are lucky, we can just hold the read locks, check that we are fresh, and continue. + _, modified, err := r.modified() + if err != nil { + return err + } + if modified { + // We are unlucky, and need to reload. + // NOTE: Multiple goroutines can get to this place approximately simultaneously. + r.inProcessLock.RUnlock() + unlockFn = r.lockfile.Unlock + + // r.lastWrite can change at this point if another goroutine reloads the store before us. That’s why we don’t unconditionally + // trigger a load below; we (lock and) reloadIfChanged() again. + + // First try reloading with r.lockfile held for reading. + // r.inProcessLock will serialize all goroutines that got here; + // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data. + var tryLockedForWriting bool + if err := inProcessLocked(func() error { + // We could optimize this further: The r.lockfile.GetLastWrite() value shouldn’t change as long as we hold r.lockfile, + // so if r.lastWrite was already updated, we don’t need to actually read the on-filesystem lock. + var err error + tryLockedForWriting, err = r.reloadIfChanged(false) + return err + }); err != nil { + if !tryLockedForWriting { + return err + } + // Not good enough, we need r.lockfile held for writing. So, let’s do that. + unlockFn() + unlockFn = nil + + r.lockfile.Lock() + unlockFn = r.lockfile.Unlock + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(true) + return err + }); err != nil { + return err + } + unlockFn() + unlockFn = nil + + r.lockfile.RLock() + unlockFn = r.lockfile.Unlock + // We need to check for a reload once more because the on-disk state could have been modified + // after we released the lock. + // If that, _again_, finds inconsistent state, just give up. + // We could, plausibly, retry a few times, but that inconsistent state (duplicate image names) + // shouldn’t be saved (by correct implementations) in the first place. + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(false) + return err + }); err != nil { + return fmt.Errorf("(even after successfully cleaning up once:) %w", err) + } + } + + // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because + // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock + // protects us against in-process writers modifying data. + // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date + // and 2) access to the in-memory data is not racy; + // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state. + + r.inProcessLock.RLock() + } + } + + unlockFn = nil + return nil +} + +// startReading makes sure the store is fresh, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +func (r *imageStore) startReading() error { + return r.startReadingWithReload(true) +} + +// stopReading releases locks obtained by startReading. +func (r *imageStore) stopReading() { + r.inProcessLock.RUnlock() + r.lockfile.Unlock() +} + +// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store), +// and a lockfile.LastWrite value for that update. +// +// The caller must hold r.lockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *imageStore) modified() (lockfile.LastWrite, bool, error) { + return r.lockfile.ModifiedSince(r.lastWrite) +} + +// reloadIfChanged reloads the contents of the store from disk if it is changed. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// reloadIfChanged() with lockedForWriting could succeed. +func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) { + lastWrite, modified, err := r.modified() + if err != nil { + return false, err + } + // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load() + // and modify no fields, to ensure they see fresh data: + // r.lockfile.Modified() only returns true once per change. Without an exclusive lock, + // one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could + // see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale. + if modified { + if tryLockedForWriting, err := r.load(lockedForWriting); err != nil { + return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again. + } + r.lastWrite = lastWrite + } + return false, nil +} + +// Requires startReading or startWriting. +func (r *imageStore) Images() ([]Image, error) { + images := make([]Image, len(r.images)) + for i := range r.images { + images[i] = *copyImage(r.images[i]) + } + return images, nil +} + +// This looks for datadirs in the store directory that are not referenced +// by the json file and removes it. These can happen in the case of unclean +// shutdowns. +// Requires startReading or startWriting. +func (r *imageStore) GarbageCollect() error { + entries, err := os.ReadDir(r.dir) + if err != nil { + // Unexpected, don't try any GC + return err + } + + for _, entry := range entries { + id := entry.Name() + // Does it look like a datadir directory? + if !entry.IsDir() || stringid.ValidateID(id) != nil { + continue + } + + // Should the id be there? + if r.byid[id] != nil { + continue + } + + // Otherwise remove datadir + logrus.Debugf("removing %q", filepath.Join(r.dir, id)) + moreErr := os.RemoveAll(filepath.Join(r.dir, id)) + // Propagate first error + if moreErr != nil && err == nil { + err = moreErr + } + } + + return err +} + +func (r *imageStore) imagespath() string { + return filepath.Join(r.dir, "images.json") +} + +func (r *imageStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *imageStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +// bigDataNameIsManifest determines if a big data item with the specified name +// is considered to be representative of the image, in that its digest can be +// said to also be the image's digest. Currently, if its name is, or begins +// with, "manifest", we say that it is. +func bigDataNameIsManifest(name string) bool { + return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix) +} + +// recomputeDigests takes a fixed digest and a name-to-digest map and builds a +// list of the unique values that would identify the image. +// The caller must hold r.inProcessLock for writing. +func (i *Image) recomputeDigests() error { + validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1) + digests := make(map[digest.Digest]struct{}) + if i.Digest != "" { + if err := i.Digest.Validate(); err != nil { + return fmt.Errorf("validating image digest %q: %w", string(i.Digest), err) + } + digests[i.Digest] = struct{}{} + validDigests = append(validDigests, i.Digest) + } + for name, digest := range i.BigDataDigests { + if !bigDataNameIsManifest(name) { + continue + } + if err := digest.Validate(); err != nil { + return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, err) + } + // Deduplicate the digest values. + if _, known := digests[digest]; !known { + digests[digest] = struct{}{} + validDigests = append(validDigests, digest) + } + } + if i.Digest == "" && len(validDigests) > 0 { + i.Digest = validDigests[0] + } + i.Digests = validDigests + return nil +} + +// load reloads the contents of the store from disk. +// +// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly +// manage r.lastWrite. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// retrying with lockedForWriting could succeed. +func (r *imageStore) load(lockedForWriting bool) (bool, error) { + rpath := r.imagespath() + data, err := os.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return false, err + } + + images := []*Image{} + if len(data) != 0 { + if err := json.Unmarshal(data, &images); err != nil { + return false, fmt.Errorf("loading %q: %w", rpath, err) + } + } + idlist := make([]string, 0, len(images)) + ids := make(map[string]*Image) + names := make(map[string]*Image) + digests := make(map[digest.Digest][]*Image) + var errorToResolveBySaving error // == nil + for n, image := range images { + ids[image.ID] = images[n] + idlist = append(idlist, image.ID) + for _, name := range image.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + errorToResolveBySaving = ErrDuplicateImageNames + } + } + // Compute the digest list. + if err := image.recomputeDigests(); err != nil { + return false, fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err) + } + for _, name := range image.Names { + names[name] = image + } + for _, digest := range image.Digests { + list := digests[digest] + digests[digest] = append(list, image) + } + image.ReadOnly = !r.lockfile.IsReadWrite() + } + + if errorToResolveBySaving != nil { + if !r.lockfile.IsReadWrite() { + return false, errorToResolveBySaving + } + if !lockedForWriting { + return true, errorToResolveBySaving + } + } + r.images = images + r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. + r.byid = ids + r.byname = names + r.bydigest = digests + if errorToResolveBySaving != nil { + return false, r.Save() + } + return false, nil +} + +// Save saves the contents of the store to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). +func (r *imageStore) Save() error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly) + } + r.lockfile.AssertLockedForWriting() + rpath := r.imagespath() + if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { + return err + } + jdata, err := json.Marshal(&r.images) + if err != nil { + return err + } + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. + lw, err := r.lockfile.RecordWrite() + if err != nil { + return err + } + r.lastWrite = lw + if err := ioutils.AtomicWriteFile(rpath, jdata, 0o600); err != nil { + return err + } + return nil +} + +func newImageStore(dir string) (rwImageStore, error) { + if err := os.MkdirAll(dir, 0o700); err != nil { + return nil, err + } + lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + istore := imageStore{ + lockfile: lockfile, + dir: dir, + + images: []*Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), + } + if err := istore.startWritingWithReload(false); err != nil { + return nil, err + } + defer istore.stopWriting() + istore.lastWrite, err = istore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + if _, err := istore.load(true); err != nil { + return nil, err + } + return &istore, nil +} + +func newROImageStore(dir string) (roImageStore, error) { + lockfile, err := lockfile.GetROLockFile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + istore := imageStore{ + lockfile: lockfile, + dir: dir, + + images: []*Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), + } + if err := istore.startReadingWithReload(false); err != nil { + return nil, err + } + defer istore.stopReading() + istore.lastWrite, err = istore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + if _, err := istore.load(false); err != nil { + return nil, err + } + return &istore, nil +} + +// Requires startReading or startWriting. +func (r *imageStore) lookup(id string) (*Image, bool) { + if image, ok := r.byid[id]; ok { + return image, ok + } else if image, ok := r.byname[id]; ok { + return image, ok + } else if longid, err := r.idindex.Get(id); err == nil { + image, ok := r.byid[longid] + return image, ok + } + return nil, false +} + +// Requires startWriting. +func (r *imageStore) ClearFlag(id string, flag string) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) + } + image, ok := r.lookup(id) + if !ok { + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + delete(image.Flags, flag) + return r.Save() +} + +// Requires startWriting. +func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) + } + image, ok := r.lookup(id) + if !ok { + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + if image.Flags == nil { + image.Flags = make(map[string]interface{}) + } + image.Flags[flag] = value + return r.Save() +} + +// Requires startWriting. +func (r *imageStore) create(id string, names []string, layer string, options ImageOptions) (image *Image, err error) { + if !r.lockfile.IsReadWrite() { + return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, fmt.Errorf("an image with ID %q already exists: %w", id, ErrDuplicateID) + } + names = dedupeStrings(names) + for _, name := range names { + if image, nameInUse := r.byname[name]; nameInUse { + return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName) + } + } + image = &Image{ + ID: id, + Digest: options.Digest, + Digests: dedupeDigests(options.Digests), + Names: names, + NamesHistory: copyStringSlice(options.NamesHistory), + TopLayer: layer, + Metadata: options.Metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: options.CreationDate, + Flags: copyStringInterfaceMap(options.Flags), + } + if image.Created.IsZero() { + image.Created = time.Now().UTC() + } + err = image.recomputeDigests() + if err != nil { + return nil, fmt.Errorf("validating digests for new image: %w", err) + } + r.images = append(r.images, image) + // This can only fail on duplicate IDs, which shouldn’t happen — and in + // that case the index is already in the desired state anyway. + // Implementing recovery from an unlikely and unimportant failure here + // would be too risky. + _ = r.idindex.Add(id) + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + for _, digest := range image.Digests { + list := r.bydigest[digest] + r.bydigest[digest] = append(list, image) + } + defer func() { + if err != nil { + // now that the in-memory structures know about the new + // record, we can use regular Delete() to clean up if + // anything breaks from here on out + if e := r.Delete(id); e != nil { + logrus.Debugf("while cleaning up partially-created image %q we failed to create: %v", id, e) + } + } + }() + err = r.Save() + if err != nil { + return nil, err + } + for _, item := range options.BigData { + if item.Digest == "" { + item.Digest = digest.Canonical.FromBytes(item.Data) + } + if err = r.setBigData(image, item.Key, item.Data, item.Digest); err != nil { + return nil, err + } + } + image = copyImage(image) + return image, err +} + +// Requires startWriting. +func (r *imageStore) addMappedTopLayer(id, layer string) error { + if image, ok := r.lookup(id); ok { + image.MappedTopLayers = append(image.MappedTopLayers, layer) + return r.Save() + } + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + +// Requires startWriting. +func (r *imageStore) removeMappedTopLayer(id, layer string) error { + if image, ok := r.lookup(id); ok { + initialLen := len(image.MappedTopLayers) + image.MappedTopLayers = stringutils.RemoveFromSlice(image.MappedTopLayers, layer) + // No layer was removed. No need to save. + if initialLen == len(image.MappedTopLayers) { + return nil + } + return r.Save() + } + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + +// Requires startReading or startWriting. +func (r *imageStore) Metadata(id string) (string, error) { + if image, ok := r.lookup(id); ok { + return image.Metadata, nil + } + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + +// Requires startWriting. +func (r *imageStore) SetMetadata(id, metadata string) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly) + } + if image, ok := r.lookup(id); ok { + image.Metadata = metadata + return r.Save() + } + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + +// The caller must hold r.inProcessLock for writing. +func (r *imageStore) removeName(image *Image, name string) { + image.Names = stringSliceWithoutValue(image.Names, name) +} + +// The caller must hold r.inProcessLock for writing. +func (i *Image) addNameToHistory(name string) { + i.NamesHistory = dedupeStrings(append([]string{name}, i.NamesHistory...)) +} + +// Requires startWriting. +func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly) + } + image, ok := r.lookup(id) + if !ok { + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + oldNames := image.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) + } + r.byname[name] = image + image.addNameToHistory(name) + } + image.Names = names + return r.Save() +} + +// Requires startWriting. +func (r *imageStore) Delete(id string) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) + } + image, ok := r.lookup(id) + if !ok { + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + id = image.ID + toDeleteIndex := -1 + for i, candidate := range r.images { + if candidate.ID == id { + toDeleteIndex = i + } + } + delete(r.byid, id) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Delete(id) + for _, name := range image.Names { + delete(r.byname, name) + } + for _, digest := range image.Digests { + prunedList := imageSliceWithoutValue(r.bydigest[digest], image) + if len(prunedList) == 0 { + delete(r.bydigest, digest) + } else { + r.bydigest[digest] = prunedList + } + } + if toDeleteIndex != -1 { + // delete the image at toDeleteIndex + if toDeleteIndex == len(r.images)-1 { + r.images = r.images[:len(r.images)-1] + } else { + r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) + } + } + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + return nil +} + +// Requires startReading or startWriting. +func (r *imageStore) Get(id string) (*Image, error) { + if image, ok := r.lookup(id); ok { + return copyImage(image), nil + } + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + +// Requires startReading or startWriting. +func (r *imageStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +// Requires startReading or startWriting. +func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { + if images, ok := r.bydigest[d]; ok { + return copyImageSlice(images), nil + } + return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown) +} + +// Requires startReading or startWriting. +func (r *imageStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName) + } + image, ok := r.lookup(id) + if !ok { + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + return os.ReadFile(r.datapath(image.ID, key)) +} + +// Requires startReading or startWriting. +func (r *imageStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName) + } + image, ok := r.lookup(id) + if !ok { + return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + if size, ok := image.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil. + return size, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + return int64(len(data)), nil + } + return -1, ErrSizeUnknown +} + +// Requires startReading or startWriting. +func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName) + } + image, ok := r.lookup(id) + if !ok { + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + if d, ok := image.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataDigests == nil. + return d, nil + } + return "", ErrDigestUnknown +} + +// Requires startReading or startWriting. +func (r *imageStore) BigDataNames(id string) ([]string, error) { + image, ok := r.lookup(id) + if !ok { + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + return copyStringSlice(image.BigDataNames), nil +} + +func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { + modified := make([]*Image, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + +// Requires startWriting. +func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) + } + image, ok := r.lookup(id) + if !ok { + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + var err error + var newDigest digest.Digest + if bigDataNameIsManifest(key) { + if digestManifest == nil { + return fmt.Errorf("digesting manifest: no manifest digest callback provided: %w", ErrDigestUnknown) + } + if newDigest, err = digestManifest(data); err != nil { + return fmt.Errorf("digesting manifest: %w", err) + } + } else { + newDigest = digest.Canonical.FromBytes(data) + } + return r.setBigData(image, key, data, newDigest) +} + +// Requires startWriting. +func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest digest.Digest) error { + if key == "" { + return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName) + } + err := os.MkdirAll(r.datadir(image.ID), 0o700) + if err != nil { + return err + } + err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0o600) + if err == nil { + save := false + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := image.BigDataSizes[key] + image.BigDataSizes[key] = int64(len(data)) + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := image.BigDataDigests[key] + image.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + save = true + } + addName := true + for _, name := range image.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + image.BigDataNames = append(image.BigDataNames, key) + save = true + } + for _, oldDigest := range image.Digests { + // remove the image from the list of images in the digest-based index + if list, ok := r.bydigest[oldDigest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, oldDigest) + } else { + r.bydigest[oldDigest] = prunedList + } + } + } + if err = image.recomputeDigests(); err != nil { + return fmt.Errorf("loading recomputing image digest information for %s: %w", image.ID, err) + } + for _, newDigest := range image.Digests { + // add the image to the list of images in the digest-based index which + // corresponds to the new digest for this item, unless it's already there + list := r.bydigest[newDigest] + if len(list) == len(imageSliceWithoutValue(list, image)) { + // the list isn't shortened by trying to prune this image from it, + // so it's not in there yet + r.bydigest[newDigest] = append(list, image) + } + } + if save { + err = r.Save() + } + } + return err +} + +// Requires startWriting. +func (r *imageStore) Wipe() error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) + } + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/jsoniter.go b/vendor/github.com/containers/storage/jsoniter.go new file mode 100644 index 0000000000..7dd6388d7f --- /dev/null +++ b/vendor/github.com/containers/storage/jsoniter.go @@ -0,0 +1,5 @@ +package storage + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go new file mode 100644 index 0000000000..d105e73f6c --- /dev/null +++ b/vendor/github.com/containers/storage/layers.go @@ -0,0 +1,2508 @@ +package storage + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/tarlog" + "github.com/containers/storage/pkg/truncindex" + multierror "github.com/hashicorp/go-multierror" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/selinux/go-selinux" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +const ( + tarSplitSuffix = ".tar-split.gz" + incompleteFlag = "incomplete" + // maxLayerStoreCleanupIterations is the number of times we try to clean up inconsistent layer store state + // in readers (which, for implementation reasons, gives other writers the opportunity to create more inconsistent state) + // until we just give up. + maxLayerStoreCleanupIterations = 3 +) + +type layerLocations uint8 + +// The backing store is split in two json files, one (the volatile) +// that is written without fsync() meaning it isn't as robust to +// unclean shutdown +const ( + stableLayerLocation layerLocations = 1 << iota + volatileLayerLocation + + numLayerLocationIndex = iota +) + +func layerLocationFromIndex(index int) layerLocations { + return 1 << index +} + +// A Layer is a record of a copy-on-write layer that's stored by the lower +// level graph driver. +type Layer struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // layer can be referred to by its ID or any of its names. Names are + // unique among layers. + Names []string `json:"names,omitempty"` + + // Parent is the ID of a layer from which this layer inherits data. + Parent string `json:"parent,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // MountLabel is an SELinux label which should be used when attempting to mount + // the layer. + MountLabel string `json:"mountlabel,omitempty"` + + // MountPoint is the path where the layer is mounted, or where it was most + // recently mounted. + // + // WARNING: This field is a snapshot in time: (except for users inside c/storage that + // hold the mount lock) the true value can change between subsequent + // calls to c/storage API. + // + // Users that need to handle concurrent mount/unmount attempts should not access this + // field at all, and should only use the path returned by .Mount() (and that’s only + // assuming no other user will concurrently decide to unmount that mount point). + MountPoint string `json:"-"` + + // MountCount is used as a reference count for the container's layer being + // mounted at the mount point. + // + // WARNING: This field is a snapshot in time; (except for users inside c/storage that + // hold the mount lock) the true value can change between subsequent + // calls to c/storage API. + // + // In situations where concurrent mount/unmount attempts can happen, this field + // should not be used for any decisions, maybe apart from heuristic user warnings. + MountCount int `json:"-"` + + // Created is the datestamp for when this layer was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // CompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or create(), as it was presented to us. + CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"` + + // CompressedSize is the length of the blob that was last passed to + // ApplyDiff() or create(), as it was presented to us. If + // CompressedDigest is not set, this should be treated as if it were an + // uninitialized value. + CompressedSize int64 `json:"compressed-size,omitempty"` + + // UncompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or create(), after we decompressed it. Often referred to + // as a DiffID. + UncompressedDigest digest.Digest `json:"diff-digest,omitempty"` + + // UncompressedSize is the length of the blob that was last passed to + // ApplyDiff() or create(), after we decompressed it. If + // UncompressedDigest is not set, this should be treated as if it were + // an uninitialized value. + UncompressedSize int64 `json:"diff-size,omitempty"` + + // CompressionType is the type of compression which we detected on the blob + // that was last passed to ApplyDiff() or create(). + CompressionType archive.Compression `json:"compression,omitempty"` + + // UIDs and GIDs are lists of UIDs and GIDs used in the layer. This + // field is only populated (i.e., will only contain one or more + // entries) if the layer was created using ApplyDiff() or create(). + UIDs []uint32 `json:"uidset,omitempty"` + GIDs []uint32 `json:"gidset,omitempty"` + + // Flags is arbitrary data about the layer. + Flags map[string]interface{} `json:"flags,omitempty"` + + // UIDMap and GIDMap are used for setting up a layer's contents + // for use inside of a user namespace where UID mapping is being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + + // ReadOnly is true if this layer resides in a read-only layer store. + ReadOnly bool `json:"-"` + + // volatileStore is true if the container is from the volatile json file + volatileStore bool `json:"-"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` +} + +type layerMountPoint struct { + ID string `json:"id"` + MountPoint string `json:"path"` + MountCount int `json:"count"` +} + +// DiffOptions override the default behavior of Diff() methods. +type DiffOptions struct { + // Compression, if set overrides the default compressor when generating a diff. + Compression *archive.Compression +} + +// roLayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type roLayerStore interface { + roMetadataStore + roLayerBigDataStore + + // startReading makes sure the store is fresh, and locks it for reading. + // If this succeeds, the caller MUST call stopReading(). + startReading() error + + // stopReading releases locks obtained by startReading. + stopReading() + + // Exists checks if a layer with the specified name or ID is known. + Exists(id string) bool + + // Get retrieves information about a layer given an ID or name. + Get(id string) (*Layer, error) + + // Status returns an slice of key-value pairs, suitable for human consumption, + // relaying whatever status information the underlying driver can share. + Status() ([][2]string, error) + + // Changes returns a slice of Change structures, which contain a pathname + // (Path) and a description of what sort of change (Kind) was made by the + // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a + // specified layer. By default, the layer's parent is used as a reference. + Changes(from, to string) ([]archive.Change, error) + + // Diff produces a tarstream which can be applied to a layer with the contents + // of the first layer to produce a layer with the contents of the second layer. + // By default, the parent of the second layer is used as the first + // layer, so it need not be specified. Options can be used to override + // default behavior, but are also not required. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + + // DiffSize produces an estimate of the length of the tarstream which would be + // produced by Diff. + DiffSize(from, to string) (int64, error) + + // Size produces a cached value for the uncompressed size of the layer, + // if one is known, or -1 if it is not known. If the layer can not be + // found, it returns an error. + Size(name string) (int64, error) + + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // Layers returns a slice of the known layers. + Layers() ([]Layer, error) +} + +// rwLayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type rwLayerStore interface { + roLayerStore + rwMetadataStore + flaggableStore + rwLayerBigDataStore + + // startWriting makes sure the store is fresh, and locks it for writing. + // If this succeeds, the caller MUST call stopWriting(). + startWriting() error + + // stopWriting releases locks obtained by startWriting. + stopWriting() + + // create creates a new layer, optionally giving it a specified ID rather than + // a randomly-generated one, either inheriting data from another specified + // layer or the empty base layer. The new layer can optionally be given names + // and have an SELinux label specified for use when mounting it. Some + // underlying drivers can accept a "size" option. At this time, most + // underlying drivers do not themselves distinguish between writeable + // and read-only layers. Returns the new layer structure and the size of the + // diff which was applied to its parent to initialize its contents. + create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error) + + // updateNames modifies names associated with a layer based on (op, names). + updateNames(id string, names []string, op updateNameOperation) error + + // Delete deletes a layer with the specified name or ID. + Delete(id string) error + + // Wipe deletes all layers. + Wipe() error + + // Mount mounts a layer for use. If the specified layer is the parent of other + // layers, it should not be written to. An SELinux label to be applied to the + // mount can be specified to override the one configured for the layer. + // The mappings used by the container can be specified. + Mount(id string, options drivers.MountOpts) (string, error) + + // unmount unmounts a layer when it is no longer in use. + // If conditional is set, it will fail with ErrLayerNotMounted if the layer is not mounted (without conditional, the caller is + // making a promise that the layer is actually mounted). + // If force is set, it will physically try to unmount it even if it is mounted multiple times, or even if (!conditional and) + // there are no records of it being mounted in the first place. + // It returns whether the layer was still mounted at the time this function returned. + // WARNING: The return value may already be obsolete by the time it is available + // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored. + unmount(id string, force bool, conditional bool) (bool, error) + + // Mounted returns number of times the layer has been mounted. + Mounted(id string) (int, error) + + // ParentOwners returns the UIDs and GIDs of parents of the layer's mountpoint + // for which the layer's UID and GID maps don't contain corresponding entries. + ParentOwners(id string) (uids, gids []int, err error) + + // ApplyDiff reads a tarstream which was created by a previous call to Diff and + // applies its changes to a specified layer. + ApplyDiff(to string, diff io.Reader) (int64, error) + + // ApplyDiffWithDiffer applies the changes through the differ callback function. + // If to is the empty string, then a staging directory is created by the driver. + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + + // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagingDirectory(stagingDirectory string) error + + // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. + ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + + // DifferTarget gets the location where files are stored for the layer. + DifferTarget(id string) (string, error) + + // PutAdditionalLayer creates a layer using the diff contained in the additional layer + // store. + // This API is experimental and can be changed without bumping the major version number. + PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) + + // Clean up unreferenced layers + GarbageCollect() error +} + +type layerStore struct { + // The following fields are only set when constructing layerStore, and must never be modified afterwards. + // They are safe to access without any other locking. + lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores. + mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held. + rundir string + jsonPath [numLayerLocationIndex]string + layerdir string + + inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. + // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. + // Almost all users should use startReading() or startWriting(). + lastWrite lockfile.LastWrite + mountsLastWrite lockfile.LastWrite // Only valid if lockfile.IsReadWrite() + layers []*Layer + idindex *truncindex.TruncIndex + byid map[string]*Layer + byname map[string]*Layer + bymount map[string]*Layer + bycompressedsum map[digest.Digest][]string + byuncompressedsum map[digest.Digest][]string + layerspathsModified [numLayerLocationIndex]time.Time + + // FIXME: This field is only set when constructing layerStore, but locking rules of the driver + // interface itself are not documented here. + driver drivers.Driver +} + +// The caller must hold r.inProcessLock for reading. +func layerLocation(l *Layer) layerLocations { + if l.volatileStore { + return volatileLayerLocation + } + return stableLayerLocation +} + +func copyLayer(l *Layer) *Layer { + return &Layer{ + ID: l.ID, + Names: copyStringSlice(l.Names), + Parent: l.Parent, + Metadata: l.Metadata, + MountLabel: l.MountLabel, + MountPoint: l.MountPoint, + MountCount: l.MountCount, + Created: l.Created, + CompressedDigest: l.CompressedDigest, + CompressedSize: l.CompressedSize, + UncompressedDigest: l.UncompressedDigest, + UncompressedSize: l.UncompressedSize, + CompressionType: l.CompressionType, + ReadOnly: l.ReadOnly, + volatileStore: l.volatileStore, + BigDataNames: copyStringSlice(l.BigDataNames), + Flags: copyStringInterfaceMap(l.Flags), + UIDMap: copyIDMap(l.UIDMap), + GIDMap: copyIDMap(l.GIDMap), + UIDs: copyUint32Slice(l.UIDs), + GIDs: copyUint32Slice(l.GIDs), + } +} + +// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +// +// This is an internal implementation detail of layerStore construction, every other caller +// should use startWriting() instead. +func (r *layerStore) startWritingWithReload(canReload bool) error { + r.lockfile.Lock() + r.inProcessLock.Lock() + succeeded := false + defer func() { + if !succeeded { + r.inProcessLock.Unlock() + r.lockfile.Unlock() + } + }() + + if canReload { + if _, err := r.reloadIfChanged(true); err != nil { + return err + } + } + + succeeded = true + return nil +} + +// startWriting makes sure the store is fresh, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +func (r *layerStore) startWriting() error { + return r.startWritingWithReload(true) +} + +// stopWriting releases locks obtained by startWriting. +func (r *layerStore) stopWriting() { + r.inProcessLock.Unlock() + r.lockfile.Unlock() +} + +// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +// +// This is an internal implementation detail of layerStore construction, every other caller +// should use startReading() instead. +func (r *layerStore) startReadingWithReload(canReload bool) error { + // inProcessLocked calls the nested function with r.inProcessLock held for writing. + inProcessLocked := func(fn func() error) error { + r.inProcessLock.Lock() + defer r.inProcessLock.Unlock() + return fn() + } + + r.lockfile.RLock() + unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil + defer func() { + if unlockFn != nil { + unlockFn() + } + }() + r.inProcessLock.RLock() + unlockFn = r.stopReading + + if canReload { + // If we are lucky, we can just hold the read locks, check that we are fresh, and continue. + modified, err := r.modified() + if err != nil { + return err + } + if modified { + // We are unlucky, and need to reload. + // NOTE: Multiple goroutines can get to this place approximately simultaneously. + r.inProcessLock.RUnlock() + unlockFn = r.lockfile.Unlock + + cleanupsDone := 0 + for { + // First try reloading with r.lockfile held for reading. + // r.inProcessLock will serialize all goroutines that got here; + // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data. + var tryLockedForWriting bool + err := inProcessLocked(func() error { + var err error + tryLockedForWriting, err = r.reloadIfChanged(false) + return err + }) + if err == nil { + break + } + if !tryLockedForWriting { + return err + } + if cleanupsDone >= maxLayerStoreCleanupIterations { + return fmt.Errorf("(even after %d cleanup attempts:) %w", cleanupsDone, err) + } + // Not good enough, we need r.lockfile held for writing. So, let’s do that. + unlockFn() + unlockFn = nil + + r.lockfile.Lock() + unlockFn = r.lockfile.Unlock + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(true) + return err + }); err != nil { + return err + } + unlockFn() + unlockFn = nil + + r.lockfile.RLock() + unlockFn = r.lockfile.Unlock + // We need to check for a reload again because the on-disk state could have been modified + // after we released the lock. + cleanupsDone++ + } + + // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because + // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock + // protects us against in-process writers modifying data. + // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date + // and 2) access to the in-memory data is not racy; + // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state. + + r.inProcessLock.RLock() + } + } + + unlockFn = nil + return nil +} + +// startReading makes sure the store is fresh, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +func (r *layerStore) startReading() error { + return r.startReadingWithReload(true) +} + +// stopReading releases locks obtained by startReading. +func (r *layerStore) stopReading() { + r.inProcessLock.RUnlock() + r.lockfile.Unlock() +} + +// modified returns true if the on-disk state (of layers or mounts) has changed (ie if reloadIcHanged may need to modify the store) +// +// Note that unlike containerStore.modified and imageStore.modified, this function is not directly used in layerStore.reloadIfChanged(); +// it exists only to help the reader ensure it has fresh enough state. +// +// The caller must hold r.lockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *layerStore) modified() (bool, error) { + _, m, err := r.layersModified() + if err != nil { + return false, err + } + if m { + return true, nil + } + if r.lockfile.IsReadWrite() { + // This means we get, release, and re-obtain, r.mountsLockfile if we actually need to do any kind of reload. + // That’s a bit expensive, but hopefully most callers will be read-only and see no changes. + // We can’t eliminate these mountsLockfile accesses given the current assumption that Layer objects have _some_ not-very-obsolete + // mount data. Maybe we can segregate the mount-dependent and mount-independent operations better... + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + _, m, err := r.mountsModified() + if err != nil { + return false, err + } + if m { + return true, nil + } + } + return false, nil +} + +// layersModified() checks if the most recent writer to r.jsonPath[] was a party other than the +// last recorded writer. If so, it returns a lockfile.LastWrite value to record on a successful +// reload. +// It should only be called with the lock held. +// The caller must hold r.inProcessLock for reading. +func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) { + lastWrite, modified, err := r.lockfile.ModifiedSince(r.lastWrite) + if err != nil { + return lockfile.LastWrite{}, modified, err + } + if modified { + return lastWrite, true, nil + } + + // If the layers.json file or container-layers.json has been + // modified manually, then we have to reload the storage in + // any case. + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + info, err := os.Stat(r.jsonPath[locationIndex]) + if err != nil && !os.IsNotExist(err) { + return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err) + } + if info != nil && info.ModTime() != r.layerspathsModified[locationIndex] { + // In this case the LastWrite value is equal to r.lastWrite; writing it back doesn’t hurt. + return lastWrite, true, nil + } + } + + return lockfile.LastWrite{}, false, nil +} + +// reloadIfChanged reloads the contents of the store from disk if it is changed. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// reloadIfChanged() with lockedForWriting could succeed. +func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) { + lastWrite, layersModified, err := r.layersModified() + if err != nil { + return false, err + } + if layersModified { + // r.load also reloads mounts data; so, on this path, we don’t need to call reloadMountsIfChanged. + if tryLockedForWriting, err := r.load(lockedForWriting); err != nil { + return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again. + } + r.lastWrite = lastWrite + return false, nil + } + if r.lockfile.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if err := r.reloadMountsIfChanged(); err != nil { + return false, err + } + } + return false, nil +} + +// mountsModified returns true if the on-disk mount state has changed (i.e. if reloadMountsIfChanged may need to modify the store), +// and a lockfile.LastWrite value for that update. +// +// The caller must hold r.mountsLockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *layerStore) mountsModified() (lockfile.LastWrite, bool, error) { + return r.mountsLockfile.ModifiedSince(r.mountsLastWrite) +} + +// reloadMountsIfChanged reloads the contents of mountsPath from disk if it is changed. +// +// The caller must hold r.mountsLockFile for reading or writing. +func (r *layerStore) reloadMountsIfChanged() error { + lastWrite, modified, err := r.mountsModified() + if err != nil { + return err + } + if modified { + if err = r.loadMounts(); err != nil { + return err + } + r.mountsLastWrite = lastWrite + } + return nil +} + +// Requires startReading or startWriting. +func (r *layerStore) Layers() ([]Layer, error) { + layers := make([]Layer, len(r.layers)) + for i := range r.layers { + layers[i] = *copyLayer(r.layers[i]) + } + return layers, nil +} + +// Requires startWriting. +func (r *layerStore) GarbageCollect() error { + layers, err := r.driver.ListLayers() + if err != nil { + if errors.Is(err, drivers.ErrNotSupported) { + return nil + } + return err + } + + for _, id := range layers { + // Is the id still referenced + if r.byid[id] != nil { + continue + } + + // Remove layer and any related data of unreferenced id + if err := r.driver.Remove(id); err != nil { + logrus.Debugf("removing driver layer %q", id) + return err + } + + logrus.Debugf("removing %q", r.tspath(id)) + os.Remove(r.tspath(id)) + logrus.Debugf("removing %q", r.datadir(id)) + os.RemoveAll(r.datadir(id)) + } + return nil +} + +func (r *layerStore) mountspath() string { + return filepath.Join(r.rundir, "mountpoints.json") +} + +// load reloads the contents of the store from disk. +// +// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly +// manage r.lastWrite. +// +// As a side effect, this sets r.mountsLastWrite. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// retrying with lockedForWriting could succeed. +func (r *layerStore) load(lockedForWriting bool) (bool, error) { + var modifiedLocations layerLocations + + layers := []*Layer{} + ids := make(map[string]*Layer) + + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + location := layerLocationFromIndex(locationIndex) + rpath := r.jsonPath[locationIndex] + info, err := os.Stat(rpath) + if err != nil { + if !os.IsNotExist(err) { + return false, err + } + } else { + r.layerspathsModified[locationIndex] = info.ModTime() + } + data, err := os.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return false, err + } + + locationLayers := []*Layer{} + if len(data) != 0 { + if err := json.Unmarshal(data, &locationLayers); err != nil { + return false, fmt.Errorf("loading %q: %w", rpath, err) + } + } + + for _, layer := range locationLayers { + // There should be no duplicated ids between json files, but lets check to be sure + if ids[layer.ID] != nil { + continue // skip invalid duplicated layer + } + // Remember where the layer came from + if location == volatileLayerLocation { + layer.volatileStore = true + } + layers = append(layers, layer) + ids[layer.ID] = layer + } + } + + idlist := make([]string, 0, len(layers)) + names := make(map[string]*Layer) + compressedsums := make(map[digest.Digest][]string) + uncompressedsums := make(map[digest.Digest][]string) + var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them. + if r.lockfile.IsReadWrite() { + selinux.ClearLabels() + } + for n, layer := range layers { + idlist = append(idlist, layer.ID) + for _, name := range layer.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + errorToResolveBySaving = ErrDuplicateLayerNames + modifiedLocations |= layerLocation(conflict) + } + names[name] = layers[n] + } + if layer.CompressedDigest != "" { + compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) + } + if layer.MountLabel != "" { + selinux.ReserveLabel(layer.MountLabel) + } + layer.ReadOnly = !r.lockfile.IsReadWrite() + // The r.lockfile.IsReadWrite() condition maintains past practice: + // Incomplete layers in a read-only store are not treated as a reason to refuse to use other layers from that store + // (OTOH creating child layers on top would probably lead to problems?). + // We do remove incomplete layers in read-write stores so that we don’t build on top of them. + if layerHasIncompleteFlag(layer) && r.lockfile.IsReadWrite() { + errorToResolveBySaving = errors.New("an incomplete layer exists and can't be cleaned up") + } + } + + if errorToResolveBySaving != nil { + if !r.lockfile.IsReadWrite() { + return false, errorToResolveBySaving + } + if !lockedForWriting { + return true, errorToResolveBySaving + } + } + r.layers = layers + r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. + r.byid = ids + r.byname = names + r.bycompressedsum = compressedsums + r.byuncompressedsum = uncompressedsums + + // Load and merge information about which layers are mounted, and where. + if r.lockfile.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + // We need to reload mounts unconditionally, becuause by creating r.layers from scratch, we have discarded the previous + // information, if any. So, obtain a fresh mountsLastWrite value so that we don’t unnecessarily reload the data + // afterwards. + mountsLastWrite, err := r.mountsLockfile.GetLastWrite() + if err != nil { + return false, err + } + if err := r.loadMounts(); err != nil { + return false, err + } + r.mountsLastWrite = mountsLastWrite + // NOTE: We will release mountsLockfile when this function returns, so unlike most of the layer data, the + // r.layers[].MountPoint, r.layers[].MountCount, and r.bymount values might not reflect + // true on-filesystem state already by the time this function returns. + // Code that needs the state to be accurate must lock r.mountsLockfile again, + // and possibly loadMounts() again. + } + + if errorToResolveBySaving != nil { + if !r.lockfile.IsReadWrite() { + return false, fmt.Errorf("internal error: layerStore.load has shouldSave but !r.lockfile.IsReadWrite") + } + // Last step: try to remove anything that a previous + // user of this storage area marked for deletion but didn't manage to + // actually delete. + var incompleteDeletionErrors error // = nil + for _, layer := range r.layers { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + if layerHasIncompleteFlag(layer) { + logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) + err := r.deleteInternal(layer.ID) + if err != nil { + // Don't return the error immediately, because deleteInternal does not saveLayers(); + // Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully + // deleted incomplete layers have their metadata correctly removed. + incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors, + fmt.Errorf("deleting layer %#v: %w", layer.ID, err)) + } + modifiedLocations |= layerLocation(layer) + } + } + if err := r.saveLayers(modifiedLocations); err != nil { + return false, err + } + if incompleteDeletionErrors != nil { + return false, incompleteDeletionErrors + } + } + return false, nil +} + +// The caller must hold r.mountsLockfile for reading or writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) loadMounts() error { + mounts := make(map[string]*Layer) + mpath := r.mountspath() + data, err := os.ReadFile(mpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layerMounts := []layerMountPoint{} + if len(data) != 0 { + if err := json.Unmarshal(data, &layerMounts); err != nil { + return err + } + } + // Clear all of our mount information. If another process + // unmounted something, it (along with its zero count) won't + // have been encoded into the version of mountpoints.json that + // we're loading, so our count could fall out of sync with it + // if we don't, and if we subsequently change something else, + // we'd pass that error along to other process that reloaded + // the data after we saved it. + for _, layer := range r.layers { + layer.MountPoint = "" + layer.MountCount = 0 + } + // All of the non-zero count values will have been encoded, so + // we reset the still-mounted ones based on the contents. + for _, mount := range layerMounts { + if mount.MountPoint != "" { + if layer, ok := r.lookup(mount.ID); ok { + mounts[mount.MountPoint] = layer + layer.MountPoint = mount.MountPoint + layer.MountCount = mount.MountCount + } + } + } + r.bymount = mounts + return nil +} + +// save saves the contents of the store to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) save(saveLocations layerLocations) error { + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if err := r.saveLayers(saveLocations); err != nil { + return err + } + return r.saveMounts() +} + +// saveFor saves the contents of the store relevant for modifiedLayer to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) saveFor(modifiedLayer *Layer) error { + return r.save(layerLocation(modifiedLayer)) +} + +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) saveLayers(saveLocations layerLocations) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + r.lockfile.AssertLockedForWriting() + + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. + lw, err := r.lockfile.RecordWrite() + if err != nil { + return err + } + r.lastWrite = lw + + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + location := layerLocationFromIndex(locationIndex) + if location&saveLocations == 0 { + continue + } + rpath := r.jsonPath[locationIndex] + if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { + return err + } + subsetLayers := make([]*Layer, 0, len(r.layers)) + for _, layer := range r.layers { + if layerLocation(layer) == location { + subsetLayers = append(subsetLayers, layer) + } + } + + jldata, err := json.Marshal(&subsetLayers) + if err != nil { + return err + } + opts := ioutils.AtomicFileWriterOptions{} + if location == volatileLayerLocation { + opts.NoSync = true + } + if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0o600, &opts); err != nil { + return err + } + r.layerspathsModified[locationIndex] = opts.ModTime + } + return nil +} + +// The caller must hold r.mountsLockfile for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) saveMounts() error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + r.mountsLockfile.AssertLockedForWriting() + mpath := r.mountspath() + if err := os.MkdirAll(filepath.Dir(mpath), 0o700); err != nil { + return err + } + mounts := make([]layerMountPoint, 0, len(r.layers)) + for _, layer := range r.layers { + if layer.MountPoint != "" && layer.MountCount > 0 { + mounts = append(mounts, layerMountPoint{ + ID: layer.ID, + MountPoint: layer.MountPoint, + MountCount: layer.MountCount, + }) + } + } + jmdata, err := json.Marshal(&mounts) + if err != nil { + return err + } + + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. + lw, err := r.mountsLockfile.RecordWrite() + if err != nil { + return err + } + r.mountsLastWrite = lw + + if err = ioutils.AtomicWriteFile(mpath, jmdata, 0o600); err != nil { + return err + } + return r.loadMounts() +} + +func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { + if err := os.MkdirAll(rundir, 0o700); err != nil { + return nil, err + } + if err := os.MkdirAll(layerdir, 0o700); err != nil { + return nil, err + } + // Note: While the containers.lock file is in rundir for transient stores + // we don't want to do this here, because the non-transient layers in + // layers.json might be used externally as a read-only layer (using e.g. + // additionalimagestores), and that would look for the lockfile in the + // same directory + lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock")) + if err != nil { + return nil, err + } + volatileDir := layerdir + if transient { + volatileDir = rundir + } + rlstore := layerStore{ + lockfile: lockFile, + mountsLockfile: mountsLockfile, + rundir: rundir, + jsonPath: [numLayerLocationIndex]string{ + filepath.Join(layerdir, "layers.json"), + filepath.Join(volatileDir, "volatile-layers.json"), + }, + layerdir: layerdir, + + byid: make(map[string]*Layer), + byname: make(map[string]*Layer), + bymount: make(map[string]*Layer), + + driver: driver, + } + if err := rlstore.startWritingWithReload(false); err != nil { + return nil, err + } + defer rlstore.stopWriting() + lw, err := rlstore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + rlstore.lastWrite = lw + // rlstore.mountsLastWrite is initialized inside rlstore.load(). + if _, err := rlstore.load(true); err != nil { + return nil, err + } + return &rlstore, nil +} + +func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roLayerStore, error) { + lockfile, err := lockfile.GetROLockFile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + rlstore := layerStore{ + lockfile: lockfile, + mountsLockfile: nil, + rundir: rundir, + jsonPath: [numLayerLocationIndex]string{ + filepath.Join(layerdir, "layers.json"), + filepath.Join(layerdir, "volatile-layers.json"), + }, + layerdir: layerdir, + + byid: make(map[string]*Layer), + byname: make(map[string]*Layer), + bymount: make(map[string]*Layer), + + driver: driver, + } + if err := rlstore.startReadingWithReload(false); err != nil { + return nil, err + } + defer rlstore.stopReading() + lw, err := rlstore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + rlstore.lastWrite = lw + if _, err := rlstore.load(false); err != nil { + return nil, err + } + return &rlstore, nil +} + +// Requires startReading or startWriting. +func (r *layerStore) lookup(id string) (*Layer, bool) { + if layer, ok := r.byid[id]; ok { + return layer, ok + } else if layer, ok := r.byname[id]; ok { + return layer, ok + } else if longid, err := r.idindex.Get(id); err == nil { + layer, ok := r.byid[longid] + return layer, ok + } + return nil, false +} + +// Requires startReading or startWriting. +func (r *layerStore) Size(name string) (int64, error) { + layer, ok := r.lookup(name) + if !ok { + return -1, ErrLayerUnknown + } + // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that + // a zero value is not just present because it was never set to anything else (which can happen if the layer was + // created by a version of this library that didn't keep track of digest and size information). + if layer.UncompressedDigest != "" { + return layer.UncompressedSize, nil + } + return -1, nil +} + +// Requires startWriting. +func (r *layerStore) ClearFlag(id string, flag string) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + delete(layer.Flags, flag) + return r.saveFor(layer) +} + +// Requires startWriting. +func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[flag] = value + return r.saveFor(layer) +} + +func (r *layerStore) Status() ([][2]string, error) { + return r.driver.Status(), nil +} + +// Requires startWriting. +func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) { + if duplicateLayer, idInUse := r.byid[id]; idInUse { + return duplicateLayer, ErrDuplicateID + } + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, ErrDuplicateName + } + } + + parent := "" + if parentLayer != nil { + parent = parentLayer.ID + } + + info, err := aLayer.Info() + if err != nil { + return nil, err + } + defer info.Close() + layer = &Layer{} + if err := json.NewDecoder(info).Decode(layer); err != nil { + return nil, err + } + layer.ID = id + layer.Parent = parent + layer.Created = time.Now().UTC() + + if err := aLayer.CreateAs(id, parent); err != nil { + return nil, err + } + + // TODO: check if necessary fields are filled + r.layers = append(r.layers, layer) + // This can only fail on duplicate IDs, which shouldn’t happen — and in + // that case the index is already in the desired state anyway. + // Implementing recovery from an unlikely and unimportant failure here + // would be too risky. + _ = r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { // names got from the additional layer store won't be used + r.byname[name] = layer + } + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) + } + if err := r.saveFor(layer); err != nil { + if e := r.Delete(layer.ID); e != nil { + logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e) + } + return nil, err + } + return copyLayer(layer), nil +} + +// Requires startWriting. +func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) { + if moreOptions == nil { + moreOptions = &LayerOptions{} + } + if !r.lockfile.IsReadWrite() { + return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + if err := os.MkdirAll(r.rundir, 0o700); err != nil { + return nil, -1, err + } + if err := os.MkdirAll(r.layerdir, 0o700); err != nil { + return nil, -1, err + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if duplicateLayer, idInUse := r.byid[id]; idInUse { + return duplicateLayer, -1, ErrDuplicateID + } + names = dedupeStrings(names) + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, -1, ErrDuplicateName + } + } + parent := "" + if parentLayer != nil { + parent = parentLayer.ID + } + var ( + templateIDMappings *idtools.IDMappings + templateMetadata string + templateCompressedDigest digest.Digest + templateCompressedSize int64 + templateUncompressedDigest digest.Digest + templateUncompressedSize int64 + templateCompressionType archive.Compression + templateUIDs, templateGIDs []uint32 + templateTSdata []byte + ) + if moreOptions.TemplateLayer != "" { + templateLayer, ok := r.lookup(moreOptions.TemplateLayer) + if !ok { + return nil, -1, ErrLayerUnknown + } + templateMetadata = templateLayer.Metadata + templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize + templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize + templateCompressionType = templateLayer.CompressionType + templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) + templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID)) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, -1, err + } + } else { + templateIDMappings = &idtools.IDMappings{} + } + if mountLabel != "" { + selinux.ReserveLabel(mountLabel) + } + + // Before actually creating the layer, make a persistent record of it + // with the incomplete flag set, so that future processes have a chance + // to clean up after it. + layer = &Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Metadata: templateMetadata, + Created: time.Now().UTC(), + CompressedDigest: templateCompressedDigest, + CompressedSize: templateCompressedSize, + UncompressedDigest: templateUncompressedDigest, + UncompressedSize: templateUncompressedSize, + CompressionType: templateCompressionType, + UIDs: templateUIDs, + GIDs: templateGIDs, + Flags: copyStringInterfaceMap(moreOptions.Flags), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), + BigDataNames: []string{}, + volatileStore: moreOptions.Volatile, + } + layer.Flags[incompleteFlag] = true + + r.layers = append(r.layers, layer) + // This can only fail if the ID is already missing, which shouldn’t + // happen — and in that case the index is already in the desired state + // anyway. This is on various paths to recover from failures, so this + // should be robust against partially missing data. + _ = r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + + cleanupFailureContext := "" + defer func() { + if err != nil { + // now that the in-memory structures know about the new + // record, we can use regular Delete() to clean up if + // anything breaks from here on out + if cleanupFailureContext == "" { + cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site" + } + if e := r.Delete(id); e != nil { + logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, id, e) + } + } + }() + + if err = r.saveFor(layer); err != nil { + cleanupFailureContext = "saving incomplete layer metadata" + return nil, -1, err + } + + for _, item := range moreOptions.BigData { + if err = r.setBigData(layer, item.Key, item.Data); err != nil { + cleanupFailureContext = fmt.Sprintf("saving big data item %q", item.Key) + return nil, -1, err + } + } + + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) + opts := drivers.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: options, + IDMappings: idMappings, + } + + var parentMappings, oldMappings *idtools.IDMappings + if parentLayer != nil { + parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap) + } else { + parentMappings = &idtools.IDMappings{} + } + if moreOptions.TemplateLayer != "" { + if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { + cleanupFailureContext = fmt.Sprintf("creating a layer from template layer %q", moreOptions.TemplateLayer) + return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err) + } + oldMappings = templateIDMappings + } else { + if writeable { + if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-write layer" + return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err) + } + } else { + if err = r.driver.Create(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-only layer" + return nil, -1, fmt.Errorf("creating read-only layer with ID %q: %w", id, err) + } + } + if parentLayer != nil { + oldMappings = parentMappings + } + } + + if oldMappings != nil && + (!reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs())) { + if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { + cleanupFailureContext = "in UpdateLayerIDMap" + return nil, -1, err + } + } + + if len(templateTSdata) > 0 { + if err = os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil { + cleanupFailureContext = "creating tar-split parent directory for a copy from template" + return nil, -1, err + } + if err = ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { + cleanupFailureContext = "creating a tar-split copy from template" + return nil, -1, err + } + } + + size = -1 + if diff != nil { + if size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff); err != nil { + cleanupFailureContext = "applying layer diff" + return nil, -1, err + } + } else { + // applyDiffWithOptions() would have updated r.bycompressedsum + // and r.byuncompressedsum for us, but if we used a template + // layer, we didn't call it, so add the new layer as candidates + // for searches for layers by checksum + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) + } + } + + delete(layer.Flags, incompleteFlag) + if err = r.saveFor(layer); err != nil { + cleanupFailureContext = "saving finished layer metadata" + return nil, -1, err + } + + layer = copyLayer(layer) + return layer, size, err +} + +// Requires startReading or startWriting. +func (r *layerStore) Mounted(id string) (int, error) { + if !r.lockfile.IsReadWrite() { + return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) + } + layer, ok := r.lookup(id) + if !ok { + return 0, ErrLayerUnknown + } + // NOTE: The caller of this function is not holding (currently cannot hold) r.mountsLockfile, + // so the data is necessarily obsolete by the time this function returns. So, we don’t even + // try to reload it in this function, we just rely on r.load() that happened during + // r.startReading() or r.startWriting(). + return layer.MountCount, nil +} + +// Requires startWriting. +func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { + // LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter + // (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies + // - r.layers[].MountCount (directly and via loadMounts / saveMounts) + // - r.layers[].MountPoint (directly and via loadMounts / saveMounts) + // - r.bymount (via loadMounts / saveMounts) + + // check whether options include ro option + hasReadOnlyOpt := func(opts []string) bool { + for _, item := range opts { + if item == "ro" { + return true + } + } + return false + } + + // You are not allowed to mount layers from readonly stores if they + // are not mounted read/only. + if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) { + return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if err := r.reloadMountsIfChanged(); err != nil { + return "", err + } + layer, ok := r.lookup(id) + if !ok { + return "", ErrLayerUnknown + } + if layer.MountCount > 0 { + mounted, err := mount.Mounted(layer.MountPoint) + if err != nil { + return "", err + } + // If the container is not mounted then we have a condition + // where the kernel umounted the mount point. This means + // that the mount count never got decremented. + if mounted { + layer.MountCount++ + return layer.MountPoint, r.saveMounts() + } + } + if options.MountLabel == "" { + options.MountLabel = layer.MountLabel + } + + if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting() { + if !reflect.DeepEqual(options.UidMaps, layer.UIDMap) || !reflect.DeepEqual(options.GidMaps, layer.GIDMap) { + return "", fmt.Errorf("cannot mount layer %v: shifting not enabled", layer.ID) + } + } + mountpoint, err := r.driver.Get(id, options) + if mountpoint != "" && err == nil { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountPoint = filepath.Clean(mountpoint) + layer.MountCount++ + r.bymount[layer.MountPoint] = layer + err = r.saveMounts() + } + return mountpoint, err +} + +// Requires startWriting. +func (r *layerStore) unmount(id string, force bool, conditional bool) (bool, error) { + // LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter → simpleGetCloser.Close() + // (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies + // - r.layers[].MountCount (directly and via loadMounts / saveMounts) + // - r.layers[].MountPoint (directly and via loadMounts / saveMounts) + // - r.bymount (via loadMounts / saveMounts) + + if !r.lockfile.IsReadWrite() { + return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if err := r.reloadMountsIfChanged(); err != nil { + return false, err + } + layer, ok := r.lookup(id) + if !ok { + layerByMount, ok := r.bymount[filepath.Clean(id)] + if !ok { + return false, ErrLayerUnknown + } + layer = layerByMount + } + if conditional && layer.MountCount == 0 { + return false, ErrLayerNotMounted + } + if force { + layer.MountCount = 1 + } + if layer.MountCount > 1 { + layer.MountCount-- + return true, r.saveMounts() + } + err := r.driver.Put(id) + if err == nil || os.IsNotExist(err) { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountCount-- + layer.MountPoint = "" + return false, r.saveMounts() + } + return true, err +} + +// Requires startReading or startWriting. +func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { + if !r.lockfile.IsReadWrite() { + return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) + } + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + // We are not checking r.mountsLockfile.Modified() and calling r.loadMounts here because the store + // is only locked for reading = we are not allowed to modify layer data. + // Holding r.mountsLockfile protects us against concurrent mount/unmount operations. + layer, ok := r.lookup(id) + if !ok { + return nil, nil, ErrLayerUnknown + } + if len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + // We're not using any mappings, so there aren't any unmapped IDs on parent directories. + return nil, nil, nil + } + if layer.MountPoint == "" { + // We don't know which directories to examine. + return nil, nil, ErrLayerNotMounted + } + // Holding r.mountsLockfile protects us against concurrent mount/unmount operations, but we didn’t + // hold it continuously since the time we loaded the mount data; so it’s possible the layer + // was unmounted in the meantime, or mounted elsewhere. Treat that as if we were run after the unmount, + // = a missing mount, not a filesystem error. + if _, err := system.Lstat(layer.MountPoint); errors.Is(err, os.ErrNotExist) { + return nil, nil, ErrLayerNotMounted + } + rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap) + if err != nil { + return nil, nil, fmt.Errorf("reading root ID values for layer %q: %w", layer.ID, err) + } + m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) + fsuids := make(map[int]struct{}) + fsgids := make(map[int]struct{}) + for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + st, err := system.Stat(dir) + if err != nil { + return nil, nil, fmt.Errorf("read directory ownership: %w", err) + } + lst, err := system.Lstat(dir) + if err != nil { + return nil, nil, err + } + fsuid := int(st.UID()) + fsgid := int(st.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + fsuid = int(lst.UID()) + fsgid = int(lst.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + } + for uid := range fsuids { + uids = append(uids, uid) + } + for gid := range fsgids { + gids = append(gids, gid) + } + if len(uids) > 1 { + sort.Ints(uids) + } + if len(gids) > 1 { + sort.Ints(gids) + } + return uids, gids, nil +} + +// Requires startWriting. +func (r *layerStore) removeName(layer *Layer, name string) { + layer.Names = stringSliceWithoutValue(layer.Names, name) +} + +// Requires startWriting. +func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + oldNames := layer.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) + } + r.byname[name] = layer + } + layer.Names = names + return r.saveFor(layer) +} + +func (r *layerStore) datadir(id string) string { + return filepath.Join(r.layerdir, id) +} + +func (r *layerStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +// Requires startReading or startWriting. +func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) { + if key == "" { + return nil, fmt.Errorf("can't retrieve layer big data value for empty name: %w", ErrInvalidBigDataName) + } + layer, ok := r.lookup(id) + if !ok { + return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown) + } + return os.Open(r.datapath(layer.ID, key)) +} + +// Requires startWriting. +func (r *layerStore) SetBigData(id, key string, data io.Reader) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + layer, ok := r.lookup(id) + if !ok { + return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown) + } + return r.setBigData(layer, key, data) +} + +func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error { + if key == "" { + return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName) + } + err := os.MkdirAll(r.datadir(layer.ID), 0o700) + if err != nil { + return err + } + + // NewAtomicFileWriter doesn't overwrite/truncate the existing inode. + // BigData() relies on this behaviour when opening the file for read + // so that it is either accessing the old data or the new one. + writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0o600) + if err != nil { + return fmt.Errorf("opening bigdata file: %w", err) + } + + if _, err := io.Copy(writer, data); err != nil { + writer.Close() + return fmt.Errorf("copying bigdata for the layer: %w", err) + + } + if err := writer.Close(); err != nil { + return fmt.Errorf("closing bigdata file for the layer: %w", err) + } + + addName := true + for _, name := range layer.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + layer.BigDataNames = append(layer.BigDataNames, key) + return r.saveFor(layer) + } + return nil +} + +// Requires startReading or startWriting. +func (r *layerStore) BigDataNames(id string) ([]string, error) { + layer, ok := r.lookup(id) + if !ok { + return nil, fmt.Errorf("locating layer with ID %q to retrieve bigdata names: %w", id, ErrImageUnknown) + } + return copyStringSlice(layer.BigDataNames), nil +} + +// Requires startReading or startWriting. +func (r *layerStore) Metadata(id string) (string, error) { + if layer, ok := r.lookup(id); ok { + return layer.Metadata, nil + } + return "", ErrLayerUnknown +} + +// Requires startWriting. +func (r *layerStore) SetMetadata(id, metadata string) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + if layer, ok := r.lookup(id); ok { + layer.Metadata = metadata + return r.saveFor(layer) + } + return ErrLayerUnknown +} + +func (r *layerStore) tspath(id string) string { + return filepath.Join(r.layerdir, id+tarSplitSuffix) +} + +// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true +// The caller must hold r.inProcessLock for reading. +func layerHasIncompleteFlag(layer *Layer) bool { + if layer.Flags == nil { + return false + } + if flagValue, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := flagValue.(bool); ok && b { + return true + } + } + return false +} + +// Requires startWriting. +func (r *layerStore) deleteInternal(id string) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + // Ensure that if we are interrupted, the layer will be cleaned up. + if !layerHasIncompleteFlag(layer) { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[incompleteFlag] = true + if err := r.saveFor(layer); err != nil { + return err + } + } + // We never unset incompleteFlag; below, we remove the entire object from r.layers. + id = layer.ID + if err := r.driver.Remove(id); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + os.Remove(r.tspath(id)) + os.RemoveAll(r.datadir(id)) + delete(r.byid, id) + for _, name := range layer.Names { + delete(r.byname, name) + } + // This can only fail if the ID is already missing, which shouldn’t + // happen — and in that case the index is already in the desired state + // anyway. The store’s Delete method is used on various paths to + // recover from failures, so this should be robust against partially + // missing data. + _ = r.idindex.Delete(id) + mountLabel := layer.MountLabel + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + r.deleteInDigestMap(id) + toDeleteIndex := -1 + for i, candidate := range r.layers { + if candidate.ID == id { + toDeleteIndex = i + break + } + } + if toDeleteIndex != -1 { + // delete the layer at toDeleteIndex + if toDeleteIndex == len(r.layers)-1 { + r.layers = r.layers[:len(r.layers)-1] + } else { + r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) + } + } + if mountLabel != "" { + var found bool + for _, candidate := range r.layers { + if candidate.MountLabel == mountLabel { + found = true + break + } + } + if !found { + selinux.ReleaseLabel(mountLabel) + } + } + return nil +} + +// Requires startWriting. +func (r *layerStore) deleteInDigestMap(id string) { + for digest, layers := range r.bycompressedsum { + for i, layerID := range layers { + if layerID == id { + layers = append(layers[:i], layers[i+1:]...) + r.bycompressedsum[digest] = layers + break + } + } + } + for digest, layers := range r.byuncompressedsum { + for i, layerID := range layers { + if layerID == id { + layers = append(layers[:i], layers[i+1:]...) + r.byuncompressedsum[digest] = layers + break + } + } + } +} + +// Requires startWriting. +func (r *layerStore) Delete(id string) error { + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + id = layer.ID + // The layer may already have been explicitly unmounted, but if not, we + // should try to clean that up before we start deleting anything at the + // driver level. + for { + _, err := r.unmount(id, false, true) + if err == ErrLayerNotMounted { + break + } + if err != nil { + return err + } + } + if err := r.deleteInternal(id); err != nil { + return err + } + return r.saveFor(layer) +} + +// Requires startReading or startWriting. +func (r *layerStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +// Requires startReading or startWriting. +func (r *layerStore) Get(id string) (*Layer, error) { + if layer, ok := r.lookup(id); ok { + return copyLayer(layer), nil + } + return nil, ErrLayerUnknown +} + +// Requires startWriting. +func (r *layerStore) Wipe() error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + sort.Slice(ids, func(i, j int) bool { + return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created) + }) + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + ids, err := r.driver.ListLayers() + if err != nil { + if !errors.Is(err, drivers.ErrNotSupported) { + return err + } + ids = nil + } + for _, id := range ids { + if err := r.driver.Remove(id); err != nil { + return err + } + } + return nil +} + +// Requires startReading or startWriting. +func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) { + var ok bool + toLayer, ok = r.lookup(to) + if !ok { + return "", "", nil, nil, ErrLayerUnknown + } + to = toLayer.ID + if from == "" { + from = toLayer.Parent + } + if from != "" { + fromLayer, ok = r.lookup(from) + if ok { + from = fromLayer.ID + } else { + fromLayer, ok = r.lookup(toLayer.Parent) + if ok { + from = fromLayer.ID + } + } + } + return from, to, fromLayer, toLayer, nil +} + +// The caller must hold r.inProcessLock for reading. +func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { + if layer == nil { + return &idtools.IDMappings{} + } + return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) +} + +// Requires startReading or startWriting. +func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) + if err != nil { + return nil, ErrLayerUnknown + } + return r.driver.Changes(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) +} + +type simpleGetCloser struct { + r *layerStore + path string + id string +} + +func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { + return os.Open(filepath.Join(s.path, path)) +} + +// LOCKING BUG: See the comments in layerStore.Diff +func (s *simpleGetCloser) Close() error { + _, err := s.r.unmount(s.id, false, false) + return err +} + +// LOCKING BUG: See the comments in layerStore.Diff +func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { + if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { + return getter.DiffGetter(id) + } + path, err := r.Mount(id, drivers.MountOpts{}) + if err != nil { + return nil, err + } + return &simpleGetCloser{ + r: r, + path: path, + id: id, + }, nil +} + +// writeCompressedData copies data from source to compressor, which is on top of pwriter. +func writeCompressedData(compressor io.WriteCloser, source io.ReadCloser) error { + defer compressor.Close() + defer source.Close() + _, err := io.Copy(compressor, source) + return err +} + +// writeCompressedDataGoroutine copies data from source to compressor, which is on top of pwriter. +// All error must be reported by updating pwriter. +func writeCompressedDataGoroutine(pwriter *io.PipeWriter, compressor io.WriteCloser, source io.ReadCloser) { + err := errors.New("internal error: unexpected panic in writeCompressedDataGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pwriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + err = writeCompressedData(compressor, source) +} + +// Requires startReading or startWriting. +func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { + var metadata storage.Unpacker + + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) + if err != nil { + return nil, ErrLayerUnknown + } + // Default to applying the type of compression that we noted was used + // for the layerdiff when it was applied. + compression := toLayer.CompressionType + // If a particular compression type (or no compression) was selected, + // use that instead. + if options != nil && options.Compression != nil { + compression = *options.Compression + } + maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) { + // Depending on whether or not compression is desired, return either the + // passed-in ReadCloser, or a new one that provides its readers with a + // compressed version of the data that the original would have provided + // to its readers. + if compression == archive.Uncompressed { + return rc, nil + } + preader, pwriter := io.Pipe() + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + rc.Close() + pwriter.Close() + preader.Close() + return nil, err + } + go writeCompressedDataGoroutine(pwriter, compressor, rc) + return preader, nil + } + + if from != toLayer.Parent { + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) + if err != nil { + return nil, err + } + return maybeCompressReadCloser(diff) + } + + if ad, ok := r.driver.(drivers.AdditionalLayerStoreDriver); ok { + if aLayer, err := ad.LookupAdditionalLayerByID(to); err == nil { + // This is an additional layer. We leverage blob API for acquiring the reproduced raw blob. + info, err := aLayer.Info() + if err != nil { + aLayer.Release() + return nil, err + } + defer info.Close() + layer := &Layer{} + if err := json.NewDecoder(info).Decode(layer); err != nil { + aLayer.Release() + return nil, err + } + blob, err := aLayer.Blob() + if err != nil { + aLayer.Release() + return nil, err + } + // If layer compression type is different from the expected one, decompress and convert it. + if compression != layer.CompressionType { + diff, err := archive.DecompressStream(blob) + if err != nil { + if err2 := blob.Close(); err2 != nil { + err = fmt.Errorf("failed to close blob file: %v: %w", err2, err) + } + aLayer.Release() + return nil, err + } + rc, err := maybeCompressReadCloser(diff) + if err != nil { + if err2 := closeAll(blob.Close, diff.Close); err2 != nil { + err = fmt.Errorf("failed to cleanup: %v: %w", err2, err) + } + aLayer.Release() + return nil, err + } + return ioutils.NewReadCloserWrapper(rc, func() error { + defer aLayer.Release() + return closeAll(blob.Close, rc.Close) + }), nil + } + return ioutils.NewReadCloserWrapper(blob, func() error { defer aLayer.Release(); return blob.Close() }), nil + } + } + + tsfile, err := os.Open(r.tspath(to)) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) + if err != nil { + return nil, err + } + return maybeCompressReadCloser(diff) + } + + decompressor, err := pgzip.NewReader(tsfile) + if err != nil { + if e := tsfile.Close(); e != nil { + logrus.Debug(e) + } + return nil, err + } + + metadata = storage.NewJSONUnpacker(decompressor) + + // LOCKING BUG: With btrfs and zfs graph drivers), this uses r.Mount() and r.unmount() holding layerStore only locked for reading + // but they modify in-memory state. + fgetter, err := r.newFileGetter(to) + if err != nil { + errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err)) + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err)) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err)) + } + return nil, errs.ErrorOrNil() + } + + tarstream := asm.NewOutputTarStream(fgetter, metadata) + rc := ioutils.NewReadCloserWrapper(tarstream, func() error { + var errs *multierror.Error + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err)) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err)) + } + if err := tarstream.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err)) + } + if err := fgetter.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err)) + } + if errs != nil { + return errs.ErrorOrNil() + } + return nil + }) + return maybeCompressReadCloser(rc) +} + +// Requires startReading or startWriting. +func (r *layerStore) DiffSize(from, to string) (size int64, err error) { + var fromLayer, toLayer *Layer + from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to) + if err != nil { + return -1, ErrLayerUnknown + } + return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) +} + +// Requires startWriting. +func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { + return r.applyDiffWithOptions(to, nil, diff) +} + +// Requires startWriting. +func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) { + if !r.lockfile.IsReadWrite() { + return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly) + } + + layer, ok := r.lookup(to) + if !ok { + return -1, ErrLayerUnknown + } + + header := make([]byte, 10240) + n, err := diff.Read(header) + if err != nil && err != io.EOF { + return -1, err + } + compression := archive.DetectCompression(header[:n]) + defragmented := io.MultiReader(bytes.NewReader(header[:n]), diff) + + // Decide if we need to compute digests + var compressedDigest, uncompressedDigest digest.Digest // = "" + var compressedDigester, uncompressedDigester digest.Digester // = nil + if layerOptions != nil && layerOptions.OriginalDigest != "" && + layerOptions.OriginalDigest.Algorithm() == digest.Canonical { + compressedDigest = layerOptions.OriginalDigest + } else { + compressedDigester = digest.Canonical.Digester() + } + if layerOptions != nil && layerOptions.UncompressedDigest != "" && + layerOptions.UncompressedDigest.Algorithm() == digest.Canonical { + uncompressedDigest = layerOptions.UncompressedDigest + } else { + uncompressedDigester = digest.Canonical.Digester() + } + + var compressedWriter io.Writer + if compressedDigester != nil { + compressedWriter = compressedDigester.Hash() + } else { + compressedWriter = io.Discard + } + compressedCounter := ioutils.NewWriteCounter(compressedWriter) + defragmented = io.TeeReader(defragmented, compressedCounter) + + tsdata := bytes.Buffer{} + uidLog := make(map[uint32]struct{}) + gidLog := make(map[uint32]struct{}) + var uncompressedCounter *ioutils.WriteCounter + + size, err = func() (int64, error) { // A scope for defer + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) + if err != nil { + return -1, err + } + defer compressor.Close() // This must happen before tsdata is consumed. + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that + logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) + } + metadata := storage.NewJSONPacker(compressor) + uncompressed, err := archive.DecompressStream(defragmented) + if err != nil { + return -1, err + } + defer uncompressed.Close() + idLogger, err := tarlog.NewLogger(func(h *tar.Header) { + if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) { + uidLog[uint32(h.Uid)] = struct{}{} + gidLog[uint32(h.Gid)] = struct{}{} + } + }) + if err != nil { + return -1, err + } + defer idLogger.Close() // This must happen before uidLog and gidLog is consumed. + uncompressedCounter = ioutils.NewWriteCounter(idLogger) + uncompressedWriter := (io.Writer)(uncompressedCounter) + if uncompressedDigester != nil { + uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash()) + } + payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter()) + if err != nil { + return -1, err + } + options := drivers.ApplyDiffOpts{ + Diff: payload, + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + size, err := r.driver.ApplyDiff(layer.ID, layer.Parent, options) + if err != nil { + return -1, err + } + return size, err + }() + if err != nil { + return -1, err + } + + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil { + return -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil { + return -1, err + } + if compressedDigester != nil { + compressedDigest = compressedDigester.Digest() + } + if uncompressedDigester != nil { + uncompressedDigest = uncompressedDigester.Digest() + } + + updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { + var newList []string + if oldvalue != "" { + for _, value := range (*m)[oldvalue] { + if value != id { + newList = append(newList, value) + } + } + if len(newList) > 0 { + (*m)[oldvalue] = newList + } else { + delete(*m, oldvalue) + } + } + if newvalue != "" { + (*m)[newvalue] = append((*m)[newvalue], id) + } + } + updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID) + layer.CompressedDigest = compressedDigest + layer.CompressedSize = compressedCounter.Count + updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID) + layer.UncompressedDigest = uncompressedDigest + layer.UncompressedSize = uncompressedCounter.Count + layer.CompressionType = compression + layer.UIDs = make([]uint32, 0, len(uidLog)) + for uid := range uidLog { + layer.UIDs = append(layer.UIDs, uid) + } + sort.Slice(layer.UIDs, func(i, j int) bool { + return layer.UIDs[i] < layer.UIDs[j] + }) + layer.GIDs = make([]uint32, 0, len(gidLog)) + for gid := range gidLog { + layer.GIDs = append(layer.GIDs, gid) + } + sort.Slice(layer.GIDs, func(i, j int) bool { + return layer.GIDs[i] < layer.GIDs[j] + }) + + err = r.saveFor(layer) + + return size, err +} + +// Requires (startReading or?) startWriting. +func (r *layerStore) DifferTarget(id string) (string, error) { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return "", ErrNotSupported + } + layer, ok := r.lookup(id) + if !ok { + return "", ErrLayerUnknown + } + return ddriver.DifferTarget(layer.ID) +} + +// Requires startWriting. +func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return ErrNotSupported + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + if options == nil { + options = &drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + } + err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options) + if err != nil { + return err + } + layer.UIDs = diffOutput.UIDs + layer.GIDs = diffOutput.GIDs + layer.UncompressedDigest = diffOutput.UncompressedDigest + layer.UncompressedSize = diffOutput.Size + layer.Metadata = diffOutput.Metadata + if len(diffOutput.TarSplit) != 0 { + tsdata := bytes.Buffer{} + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) + if err != nil { + compressor = pgzip.NewWriter(&tsdata) + } + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that + logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) + } + if _, err := compressor.Write(diffOutput.TarSplit); err != nil { + compressor.Close() + return err + } + compressor.Close() + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil { + return err + } + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil { + return err + } + } + for k, v := range diffOutput.BigData { + if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil { + if err2 := r.Delete(id); err2 != nil { + logrus.Errorf("While recovering from a failure to set big data, error deleting layer %#v: %v", id, err2) + } + return err + } + } + if err = r.saveFor(layer); err != nil { + return err + } + return err +} + +// Requires startWriting. +func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return nil, ErrNotSupported + } + + if to == "" { + output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ) + return &output, err + } + + layer, ok := r.lookup(to) + if !ok { + return nil, ErrLayerUnknown + } + if options == nil { + options = &drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + } + output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ) + if err != nil { + return nil, err + } + layer.UIDs = output.UIDs + layer.GIDs = output.GIDs + err = r.saveFor(layer) + return &output, err +} + +func (r *layerStore) CleanupStagingDirectory(stagingDirectory string) error { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return ErrNotSupported + } + return ddriver.CleanupStagingDirectory(stagingDirectory) +} + +// Requires startReading or startWriting. +func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) { + var layers []Layer + for _, layerID := range m[d] { + layer, ok := r.lookup(layerID) + if !ok { + return nil, ErrLayerUnknown + } + layers = append(layers, *copyLayer(layer)) + } + return layers, nil +} + +// Requires startReading or startWriting. +func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.bycompressedsum, d) +} + +// Requires startReading or startWriting. +func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.byuncompressedsum, d) +} + +func closeAll(closes ...func() error) (rErr error) { + for _, f := range closes { + if err := f(); err != nil { + if rErr == nil { + rErr = fmt.Errorf("close error: %w", err) + continue + } + rErr = fmt.Errorf("%v: %w", err, rErr) + } + } + return +} diff --git a/vendor/github.com/containers/storage/lockfile_compat.go b/vendor/github.com/containers/storage/lockfile_compat.go new file mode 100644 index 0000000000..640203881a --- /dev/null +++ b/vendor/github.com/containers/storage/lockfile_compat.go @@ -0,0 +1,18 @@ +package storage + +import ( + "github.com/containers/storage/pkg/lockfile" +) + +// Deprecated: Use lockfile.*LockFile. +type Locker = lockfile.Locker //lint:ignore SA1019 // lockfile.Locker is deprecated + +// Deprecated: Use lockfile.GetLockFile. +func GetLockfile(path string) (lockfile.Locker, error) { + return lockfile.GetLockfile(path) +} + +// Deprecated: Use lockfile.GetROLockFile. +func GetROLockfile(path string) (lockfile.Locker, error) { + return lockfile.GetROLockfile(path) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go new file mode 100644 index 0000000000..f221a22835 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -0,0 +1,176 @@ +package chrootarchive + +import ( + stdtar "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "sync" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/unshare" +) + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + archiver := archive.NewArchiver(idMappings) + archiver.Untar = Untar + return archiver +} + +// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver { + archiver := archive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings) + archiver.Untar = Untar + return archiver +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true, dest) +} + +// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory +// The root directory is the directory that will be chrooted to. +// `dest` must be a path within `root`, if it is not an error will be returned. +// +// `root` should set to a directory which is not controlled by any potentially +// malicious process. +// +// This should be used to prevent a potential attacker from manipulating `dest` +// such that it would provide access to files outside of `dest` through things +// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however +// sanitizing symlinks in this manner is inherrently racey: +// ref: CVE-2018-15664 +func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + return untarHandler(tarArchive, dest, options, true, root) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false, dest) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { + if tarArchive == nil { + return fmt.Errorf("empty archive") + } + if options == nil { + options = &archive.TarOptions{} + options.InUserNS = unshare.IsRootless() + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil { + return err + } + } + + r := tarArchive + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options, root) +} + +// Tar tars the requested path while chrooted to the specified root. +func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if options == nil { + options = &archive.TarOptions{} + } + return invokePack(srcPath, options, root) +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return fmt.Errorf("creating pipe extract data to %q: %w", dest, err) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := stdtar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) + } + hashWorker.Wait() + if err == nil && hashError != nil { + err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go new file mode 100644 index 0000000000..f7a16e9f98 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go @@ -0,0 +1,18 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" +) + +func invokeUnpack(decompressedArchive io.Reader, + dest string, + options *archive.TarOptions, root string, +) error { + return archive.Unpack(decompressedArchive, dest, options) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + return archive.TarWithOptions(srcPath, options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go new file mode 100644 index 0000000000..259f8c99a1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,209 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +package chrootarchive + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" +) + +// untar is the entry-point for storage-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options archive.TarOptions + + // read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + dst := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = dst + } + + if err := chroot(root); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, dst, &options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + if root == "" { + return errors.New("must specify a root to chroot to") + } + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("untar pipe failure: %w", err) + } + + if root != "" { + relDest, err := filepath.Rel(root, dest) + if err != nil { + return err + } + if relDest == "." { + relDest = "/" + } + if relDest[0] != '/' { + relDest = "/" + relDest + } + dest = relDest + } + + cmd := reexec.Command("storage-untar", dest, root) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("untar error on re-exec cmd: %w", err) + } + + // write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("untar json encode to pipe failed: %w", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | storage-untar ...` failed on storage-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(io.Discard, decompressedArchive) + + return fmt.Errorf("processing tar file(%s): %w", output, err) + } + return nil +} + +func tar() { + runtime.LockOSThread() + flag.Parse() + + src := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = src + } + + if err := realChroot(root); err != nil { + fatal(err) + } + + var options archive.TarOptions + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + rdr, err := archive.TarWithOptions(src, &options) + if err != nil { + fatal(err) + } + defer rdr.Close() + + if _, err := io.Copy(os.Stdout, rdr); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if root == "" { + return nil, errors.New("root path must not be empty") + } + + relSrc, err := filepath.Rel(root, srcPath) + if err != nil { + return nil, err + } + if relSrc == "." { + relSrc = "/" + } + if relSrc[0] != '/' { + relSrc = "/" + relSrc + } + + // make sure we didn't trim a trailing slash with the call to `Rel` + if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") { + relSrc += "/" + } + + cmd := reexec.Command("storage-tar", relSrc, root) + + errBuff := bytes.NewBuffer(nil) + cmd.Stderr = errBuff + + tarR, tarW := io.Pipe() + cmd.Stdout = tarW + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("getting options pipe for tar process: %w", err) + } + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("tar error on re-exec cmd: %w", err) + } + + go func() { + err := cmd.Wait() + if err != nil { + err = fmt.Errorf("processing tar file(%s): %w", errBuff, err) + } + tarW.CloseWithError(err) + }() + + if err := json.NewEncoder(stdin).Encode(options); err != nil { + stdin.Close() + return nil, fmt.Errorf("tar json encode to pipe failed: %w", err) + } + stdin.Close() + + return tarR, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go new file mode 100644 index 0000000000..7455022040 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,30 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.Reader, + dest string, + options *archive.TarOptions, root string, +) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the pack. We call inline instead within the daemon process. + return archive.TarWithOptions(srcPath, options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 0000000000..09ef6d5de4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,120 @@ +package chrootarchive + +import ( + "fmt" + "net" + "os" + "os/user" + "path/filepath" + + "github.com/containers/storage/pkg/mount" + "github.com/syndtr/gocapability/capability" + "golang.org/x/sys/unix" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + caps, err := capability.NewPid(0) + if err != nil { + return err + } + + // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host + // environment not in the chroot from untrusted files. + _, _ = user.Lookup("storage") + _, _ = net.LookupHost("localhost") + + // if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot + if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) { + return realChroot(path) + } + + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return fmt.Errorf("creating mount namespace before pivot: %w", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := os.MkdirTemp(path, ".pivot_root") + if err != nil { + return fmt.Errorf("setting up pivot dir: %w", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("cleaning up after pivot: %w", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := unix.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("cleaning up after failed pivot: %w", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("changing to new root: %w", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + return fmt.Errorf("making old root private after pivot: %w", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + return fmt.Errorf("while unmounting old root after pivot: %w", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return fmt.Errorf("after fallback to chroot: %w", err) + } + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("changing to new root after chroot: %w", err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 0000000000..b03e97460b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,17 @@ +//go:build !windows && !linux && !darwin +// +build !windows,!linux,!darwin + +package chrootarchive + +import "golang.org/x/sys/unix" + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} + +func chroot(path string) error { + return realChroot(path) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go new file mode 100644 index 0000000000..68b8f74f77 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go new file mode 100644 index 0000000000..7b4ea9e51c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go @@ -0,0 +1,40 @@ +package chrootarchive + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s: %w", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, options) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %w", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go new file mode 100644 index 0000000000..71ed094d1e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,128 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +package chrootarchive + +import ( + "bytes" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for storage-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := unshare.IsRootless() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = os.MkdirTemp("/", "temp-storage-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %w", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if unshare.IsRootless() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %w", err) + } + + cmd := reexec.Command("storage-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(os.Environ(), fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer stdout: %s stderr: %s %w", outBuf, errBuf, err) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %w", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go new file mode 100644 index 0000000000..8bfff5d65b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,44 @@ +package chrootarchive + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go new file mode 100644 index 0000000000..fa17c9bf83 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go new file mode 100644 index 0000000000..274a946e2f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -0,0 +1,29 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +package chrootarchive + +import ( + "fmt" + "io" + "os" + + "github.com/containers/storage/pkg/reexec" +) + +func init() { + reexec.Register("storage-applyLayer", applyLayer) + reexec.Register("storage-untar", untar) + reexec.Register("storage-tar", tar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(io.Discard, r) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go new file mode 100644 index 0000000000..fa17c9bf83 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go new file mode 100644 index 0000000000..40eec4dc0a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go @@ -0,0 +1,8 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +package chrootarchive + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go new file mode 100644 index 0000000000..5d4befc234 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -0,0 +1,664 @@ +package chunked + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + "unsafe" + + storage "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + jsoniter "github.com/json-iterator/go" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + cacheKey = "chunked-manifest-cache" + cacheVersion = 1 + + digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +type metadata struct { + tagLen int + digestLen int + tags []byte + vdata []byte +} + +type layer struct { + id string + metadata *metadata + target string +} + +type layersCache struct { + layers []layer + refs int + store storage.Store + mutex sync.RWMutex + created time.Time +} + +var ( + cacheMutex sync.Mutex + cache *layersCache +) + +func (c *layersCache) release() { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + c.refs-- + if c.refs == 0 { + cache = nil + } +} + +func getLayersCacheRef(store storage.Store) *layersCache { + cacheMutex.Lock() + defer cacheMutex.Unlock() + if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 { + cache.refs++ + return cache + } + cache := &layersCache{ + store: store, + refs: 1, + created: time.Now(), + } + return cache +} + +func getLayersCache(store storage.Store) (*layersCache, error) { + c := getLayersCacheRef(store) + + if err := c.load(); err != nil { + c.release() + return nil, err + } + return c, nil +} + +func (c *layersCache) load() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + allLayers, err := c.store.Layers() + if err != nil { + return err + } + existingLayers := make(map[string]string) + for _, r := range c.layers { + existingLayers[r.id] = r.target + } + + currentLayers := make(map[string]string) + for _, r := range allLayers { + currentLayers[r.ID] = r.ID + if _, found := existingLayers[r.ID]; found { + continue + } + + bigData, err := c.store.LayerBigData(r.ID, cacheKey) + // if the cache already exists, read and use it + if err == nil { + defer bigData.Close() + metadata, err := readMetadataFromCache(bigData) + if err == nil { + c.addLayer(r.ID, metadata) + continue + } + logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) + } else if !errors.Is(err, os.ErrNotExist) { + return err + } + + var lcd chunkedLayerData + + clFile, err := c.store.LayerBigData(r.ID, chunkedLayerDataKey) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + if clFile != nil { + cl, err := io.ReadAll(clFile) + if err != nil { + return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(cl, &lcd); err != nil { + return err + } + } + + // otherwise create it from the layer TOC. + manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) + if err != nil { + continue + } + defer manifestReader.Close() + + manifest, err := io.ReadAll(manifestReader) + if err != nil { + return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + } + + metadata, err := writeCache(manifest, lcd.Format, r.ID, c.store) + if err == nil { + c.addLayer(r.ID, metadata) + } + } + + var newLayers []layer + for _, l := range c.layers { + if _, found := currentLayers[l.id]; found { + newLayers = append(newLayers, l) + } + } + c.layers = newLayers + + return nil +} + +// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file +// is usable for deduplication with hardlinks. +// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. +func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { + digester := digest.Canonical.Digester() + + modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) + hash := digester.Hash() + + if _, err := hash.Write([]byte(f.Digest)); err != nil { + return "", err + } + + if _, err := hash.Write([]byte(modeString)); err != nil { + return "", err + } + + if len(f.Xattrs) > 0 { + keys := make([]string, 0, len(f.Xattrs)) + for k := range f.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if _, err := hash.Write([]byte(k)); err != nil { + return "", err + } + if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil { + return "", err + } + } + } + return string(digester.Digest()), nil +} + +// generateFileLocation generates a file location in the form $OFFSET@$PATH +func generateFileLocation(path string, offset uint64) []byte { + return []byte(fmt.Sprintf("%d@%s", offset, path)) +} + +// generateTag generates a tag in the form $DIGEST$OFFSET@LEN. +// the [OFFSET; LEN] points to the variable length data where the file locations +// are stored. $DIGEST has length digestLen stored in the metadata file header. +func generateTag(digest string, offset, len uint64) string { + return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) +} + +type setBigData interface { + // SetLayerBigData stores a (possibly large) chunk of named data + SetLayerBigData(id, key string, data io.Reader) error +} + +// writeCache write a cache for the layer ID. +// It generates a sorted list of digests with their offset to the path location and offset. +// The same cache is used to lookup files, chunks and candidates for deduplication with hard links. +// There are 3 kind of digests stored: +// - digest(file.payload)) +// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) +// - digest(i) for each i in chunks(file payload) +func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*metadata, error) { + var vdata bytes.Buffer + tagLen := 0 + digestLen := 0 + var tagsBuffer bytes.Buffer + + toc, err := prepareMetadata(manifest, format) + if err != nil { + return nil, err + } + + var tags []string + for _, k := range toc { + if k.Digest != "" { + location := generateFileLocation(k.Name, 0) + + off := uint64(vdata.Len()) + l := uint64(len(location)) + + d := generateTag(k.Digest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + fp, err := calculateHardLinkFingerprint(k) + if err != nil { + return nil, err + } + d = generateTag(fp, off, l) + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + + digestLen = len(k.Digest) + } + if k.ChunkDigest != "" { + location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + off := uint64(vdata.Len()) + l := uint64(len(location)) + d := generateTag(k.ChunkDigest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + digestLen = len(k.ChunkDigest) + } + } + + sort.Strings(tags) + + for _, t := range tags { + if _, err := tagsBuffer.Write([]byte(t)); err != nil { + return nil, err + } + } + + pipeReader, pipeWriter := io.Pipe() + errChan := make(chan error, 1) + go func() { + defer pipeWriter.Close() + defer close(errChan) + + // version + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil { + errChan <- err + return + } + + // len of a tag + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil { + errChan <- err + return + } + + // len of a digest + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil { + errChan <- err + return + } + + // tags length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { + errChan <- err + return + } + + // vdata length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil { + errChan <- err + return + } + + // tags + if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil { + errChan <- err + return + } + + // variable length data + if _, err := pipeWriter.Write(vdata.Bytes()); err != nil { + errChan <- err + return + } + + errChan <- nil + }() + defer pipeReader.Close() + + counter := ioutils.NewWriteCounter(io.Discard) + + r := io.TeeReader(pipeReader, counter) + + if err := dest.SetLayerBigData(id, cacheKey, r); err != nil { + return nil, err + } + + if err := <-errChan; err != nil { + return nil, err + } + + logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) + + return &metadata{ + digestLen: digestLen, + tagLen: tagLen, + tags: tagsBuffer.Bytes(), + vdata: vdata.Bytes(), + }, nil +} + +func readMetadataFromCache(bigData io.Reader) (*metadata, error) { + var version, tagLen, digestLen, tagsLen, vdataLen uint64 + if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { + return nil, err + } + if version != cacheVersion { + return nil, nil //nolint: nilnil + } + if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil { + return nil, err + } + + tags := make([]byte, tagsLen) + if _, err := bigData.Read(tags); err != nil { + return nil, err + } + + vdata := make([]byte, vdataLen) + if _, err := bigData.Read(vdata); err != nil { + return nil, err + } + + return &metadata{ + tagLen: int(tagLen), + digestLen: int(digestLen), + tags: tags, + vdata: vdata, + }, nil +} + +func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([]*internal.FileMetadata, error) { + toc, err := unmarshalToc(manifest) + if err != nil { + // ignore errors here. They might be caused by a different manifest format. + logrus.Debugf("could not unmarshal manifest: %v", err) + return nil, nil //nolint: nilnil + } + + switch format { + case graphdriver.DifferOutputFormatDir: + case graphdriver.DifferOutputFormatFlat: + toc.Entries, err = makeEntriesFlat(toc.Entries) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown format %q", format) + } + + var r []*internal.FileMetadata + chunkSeen := make(map[string]bool) + for i := range toc.Entries { + d := toc.Entries[i].Digest + if d != "" { + r = append(r, &toc.Entries[i]) + continue + } + + // chunks do not use hard link dedup so keeping just one candidate is enough + cd := toc.Entries[i].ChunkDigest + if cd != "" && !chunkSeen[cd] { + r = append(r, &toc.Entries[i]) + chunkSeen[cd] = true + } + } + + return r, nil +} + +func (c *layersCache) addLayer(id string, metadata *metadata) error { + target, err := c.store.DifferTarget(id) + if err != nil { + return fmt.Errorf("get checkout directory layer %q: %w", id, err) + } + + l := layer{ + id: id, + metadata: metadata, + target: target, + } + c.layers = append(c.layers, l) + return nil +} + +func byteSliceAsString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func findTag(digest string, metadata *metadata) (string, uint64, uint64) { + if len(digest) != metadata.digestLen { + return "", 0, 0 + } + + nElements := len(metadata.tags) / metadata.tagLen + + i := sort.Search(nElements, func(i int) bool { + d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen]) + return strings.Compare(d, digest) >= 0 + }) + if i < nElements { + d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)]) + if digest == d { + startOff := i*metadata.tagLen + metadata.digestLen + parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) + return digest, uint64(off), uint64(len) + } + } + return "", 0, 0 +} + +func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) { + if digest == "" { + return "", "", -1, nil + } + + c.mutex.RLock() + defer c.mutex.RUnlock() + + for _, layer := range c.layers { + digest, off, len := findTag(digest, layer.metadata) + if digest != "" { + position := string(layer.metadata.vdata[off : off+len]) + parts := strings.SplitN(position, "@", 2) + offFile, _ := strconv.ParseInt(parts[0], 10, 64) + return layer.target, parts[1], offFile, nil + } + } + + return "", "", -1, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// file is the file to look for. +func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) { + digest := file.Digest + if useHardLinks { + var err error + digest, err = calculateHardLinkFingerprint(file) + if err != nil { + return "", "", err + } + } + target, name, off, err := c.findDigestInternal(digest) + if off == 0 { + return target, name, err + } + return "", "", nil +} + +func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) { + return c.findDigestInternal(chunk.ChunkDigest) +} + +func unmarshalToc(manifest []byte) (*internal.TOC, error) { + var buf bytes.Buffer + count := 0 + var toc internal.TOC + + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if strings.ToLower(field) != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch strings.ToLower(field) { + case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime": + count += len(iter.ReadStringAsSlice()) + case "xattrs": + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + count += len(iter.ReadStringAsSlice()) + } + default: + iter.Skip() + } + } + } + break + } + + buf.Grow(count) + + getString := func(b []byte) string { + from := buf.Len() + buf.Write(b) + to := buf.Len() + return byteSliceAsString(buf.Bytes()[from:to]) + } + + iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if strings.ToLower(field) == "version" { + toc.Version = iter.ReadInt() + continue + } + if strings.ToLower(field) != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + var m internal.FileMetadata + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch strings.ToLower(field) { + case "type": + m.Type = getString(iter.ReadStringAsSlice()) + case "name": + m.Name = getString(iter.ReadStringAsSlice()) + case "linkname": + m.Linkname = getString(iter.ReadStringAsSlice()) + case "mode": + m.Mode = iter.ReadInt64() + case "size": + m.Size = iter.ReadInt64() + case "uid": + m.UID = iter.ReadInt() + case "gid": + m.GID = iter.ReadInt() + case "modtime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ModTime = &time + case "accesstime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.AccessTime = &time + case "changetime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ChangeTime = &time + case "devmajor": + m.Devmajor = iter.ReadInt64() + case "devminor": + m.Devminor = iter.ReadInt64() + case "digest": + m.Digest = getString(iter.ReadStringAsSlice()) + case "offset": + m.Offset = iter.ReadInt64() + case "endoffset": + m.EndOffset = iter.ReadInt64() + case "chunksize": + m.ChunkSize = iter.ReadInt64() + case "chunkoffset": + m.ChunkOffset = iter.ReadInt64() + case "chunkdigest": + m.ChunkDigest = getString(iter.ReadStringAsSlice()) + case "chunktype": + m.ChunkType = getString(iter.ReadStringAsSlice()) + case "xattrs": + m.Xattrs = make(map[string]string) + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + value := iter.ReadStringAsSlice() + m.Xattrs[key] = getString(value) + } + default: + iter.Skip() + } + } + if m.Type == TypeReg && m.Size == 0 && m.Digest == "" { + m.Digest = digestSha256Empty + } + toc.Entries = append(toc.Entries, m) + } + break + } + toc.StringsBuf = buf + return &toc, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go new file mode 100644 index 0000000000..e828d479f2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go @@ -0,0 +1,25 @@ +package chunked + +import ( + "io" + + "github.com/containers/storage/pkg/chunked/compressor" + "github.com/containers/storage/pkg/chunked/internal" +) + +const ( + TypeReg = internal.TypeReg + TypeChunk = internal.TypeChunk + TypeLink = internal.TypeLink + TypeChar = internal.TypeChar + TypeBlock = internal.TypeBlock + TypeDir = internal.TypeDir + TypeFifo = internal.TypeFifo + TypeSymlink = internal.TypeSymlink +) + +// ZstdCompressor is a CompressorFunc for the zstd compression algorithm. +// Deprecated: Use pkg/chunked/compressor.ZstdCompressor. +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return compressor.ZstdCompressor(r, metadata, level) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go new file mode 100644 index 0000000000..1d8141e387 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -0,0 +1,283 @@ +package chunked + +import ( + archivetar "archive/tar" + "errors" + "fmt" + "io" + "strconv" + + "github.com/containerd/stargz-snapshotter/estargz" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/klauspost/compress/zstd" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +var typesToTar = map[string]byte{ + TypeReg: tar.TypeReg, + TypeLink: tar.TypeLink, + TypeChar: tar.TypeChar, + TypeBlock: tar.TypeBlock, + TypeDir: tar.TypeDir, + TypeFifo: tar.TypeFifo, + TypeSymlink: tar.TypeSymlink, +} + +func typeToTarType(t string) (byte, error) { + r, found := typesToTar[t] + if !found { + return 0, fmt.Errorf("unknown type: %v", t) + } + return r, nil +} + +func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { + // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md + footerSize := int64(51) + if blobSize <= footerSize { + return nil, 0, errors.New("blob too small") + } + chunk := ImageSourceChunk{ + Offset: uint64(blobSize - footerSize), + Length: uint64(footerSize), + } + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + defer reader.Close() + footer := make([]byte, footerSize) + if _, err := io.ReadFull(reader, footer); err != nil { + return nil, 0, err + } + + /* Read the ToC offset: + - 10 bytes gzip header + - 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + - 2 bytes Extra: SI1 = 'S', SI2 = 'G' + - 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + - 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + - 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0 + - 8 bytes gzip footer + */ + tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64) + if err != nil { + return nil, 0, fmt.Errorf("parse ToC offset: %w", err) + } + + size := int64(blobSize - footerSize - tocOffset) + // set a reasonable limit + if size > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + chunk = ImageSourceChunk{ + Offset: uint64(tocOffset), + Length: uint64(size), + } + parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + + var tocReader io.ReadCloser + select { + case r := <-parts: + tocReader = r + case err := <-errs: + return nil, 0, err + } + defer tocReader.Close() + + r, err := pgzip.NewReader(tocReader) + if err != nil { + return nil, 0, err + } + defer r.Close() + + aTar := archivetar.NewReader(r) + + header, err := aTar.Next() + if err != nil { + return nil, 0, err + } + // set a reasonable limit + if header.Size > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + manifestUncompressed := make([]byte, header.Size) + if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { + return nil, 0, err + } + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(manifestUncompressed); err != nil { + return nil, 0, err + } + + d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + if err != nil { + return nil, 0, err + } + if manifestDigester.Digest() != d { + return nil, 0, errors.New("invalid manifest checksum") + } + + return manifestUncompressed, tocOffset, nil +} + +// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must +// be specified. +// This function uses the io.github.containers.zstd-chunked. annotations when specified. +func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) { + footerSize := int64(internal.FooterSizeSupported) + if blobSize <= footerSize { + return nil, nil, 0, errors.New("blob too small") + } + + var footerData internal.ZstdChunkedFooterData + + if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { + var err error + footerData, err = internal.ReadFooterDataFromAnnotations(annotations) + if err != nil { + return nil, nil, 0, err + } + } else { + chunk := ImageSourceChunk{ + Offset: uint64(blobSize - footerSize), + Length: uint64(footerSize), + } + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, nil, 0, err + } + footer := make([]byte, footerSize) + if _, err := io.ReadFull(reader, footer); err != nil { + return nil, nil, 0, err + } + + footerData, err = internal.ReadFooterDataFromBlob(footer) + if err != nil { + return nil, nil, 0, err + } + } + + if footerData.ManifestType != internal.ManifestTypeCRFS { + return nil, nil, 0, errors.New("invalid manifest type") + } + + // set a reasonable limit + if footerData.LengthCompressed > (1<<20)*50 { + return nil, nil, 0, errors.New("manifest too big") + } + if footerData.LengthUncompressed > (1<<20)*50 { + return nil, nil, 0, errors.New("manifest too big") + } + + chunk := ImageSourceChunk{ + Offset: footerData.Offset, + Length: footerData.LengthCompressed, + } + + chunks := []ImageSourceChunk{chunk} + + if footerData.OffsetTarSplit > 0 { + chunkTarSplit := ImageSourceChunk{ + Offset: footerData.OffsetTarSplit, + Length: footerData.LengthCompressedTarSplit, + } + chunks = append(chunks, chunkTarSplit) + } + + parts, errs, err := blobStream.GetBlobAt(chunks) + if err != nil { + return nil, nil, 0, err + } + + readBlob := func(len uint64) ([]byte, error) { + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, err + } + + blob := make([]byte, len) + if _, err := io.ReadFull(reader, blob); err != nil { + reader.Close() + return nil, err + } + if err := reader.Close(); err != nil { + return nil, err + } + return blob, nil + } + + manifest, err := readBlob(footerData.LengthCompressed) + if err != nil { + return nil, nil, 0, err + } + + decodedBlob, err := decodeAndValidateBlob(manifest, footerData.LengthUncompressed, footerData.ChecksumAnnotation) + if err != nil { + return nil, nil, 0, err + } + decodedTarSplit := []byte{} + if footerData.OffsetTarSplit > 0 { + tarSplit, err := readBlob(footerData.LengthCompressedTarSplit) + if err != nil { + return nil, nil, 0, err + } + + decodedTarSplit, err = decodeAndValidateBlob(tarSplit, footerData.LengthUncompressedTarSplit, footerData.ChecksumAnnotationTarSplit) + if err != nil { + return nil, nil, 0, err + } + } + return decodedBlob, decodedTarSplit, int64(footerData.Offset), err +} + +func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) { + d, err := digest.Parse(expectedUncompressedChecksum) + if err != nil { + return nil, err + } + + blobDigester := d.Algorithm().Digester() + blobChecksum := blobDigester.Hash() + if _, err := blobChecksum.Write(blob); err != nil { + return nil, err + } + if blobDigester.Digest() != d { + return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest()) + } + + decoder, err := zstd.NewReader(nil) //nolint:contextcheck + if err != nil { + return nil, err + } + defer decoder.Close() + + b := make([]byte, 0, lengthUncompressed) + return decoder.DecodeAll(blob, b) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go new file mode 100644 index 0000000000..a08928034a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -0,0 +1,230 @@ +package dump + +import ( + "bufio" + "fmt" + "io" + "strings" + "time" + "unicode" + + "github.com/containers/storage/pkg/chunked/internal" + "golang.org/x/sys/unix" +) + +const ( + ESCAPE_STANDARD = 0 + NOESCAPE_SPACE = 1 << iota + ESCAPE_EQUAL + ESCAPE_LONE_DASH +) + +func escaped(val string, escape int) string { + noescapeSpace := escape&NOESCAPE_SPACE != 0 + escapeEqual := escape&ESCAPE_EQUAL != 0 + escapeLoneDash := escape&ESCAPE_LONE_DASH != 0 + + length := len(val) + + if escapeLoneDash && val == "-" { + return fmt.Sprintf("\\x%.2x", val[0]) + } + + var result string + for i := 0; i < length; i++ { + c := val[i] + hexEscape := false + var special string + + switch c { + case '\\': + special = "\\\\" + case '\n': + special = "\\n" + case '\r': + special = "\\r" + case '\t': + special = "\\t" + case '=': + hexEscape = escapeEqual + default: + if noescapeSpace { + hexEscape = !unicode.IsPrint(rune(c)) + } else { + hexEscape = !unicode.IsGraphic(rune(c)) + } + } + + if special != "" { + result += special + } else if hexEscape { + result += fmt.Sprintf("\\x%.2x", c) + } else { + result += string(c) + } + } + return result +} + +func escapedOptional(val string, escape int) string { + if val == "" { + return "-" + } + return escaped(val, escape) +} + +func getStMode(mode uint32, typ string) (uint32, error) { + switch typ { + case internal.TypeReg, internal.TypeLink: + mode |= unix.S_IFREG + case internal.TypeChar: + mode |= unix.S_IFCHR + case internal.TypeBlock: + mode |= unix.S_IFBLK + case internal.TypeDir: + mode |= unix.S_IFDIR + case internal.TypeFifo: + mode |= unix.S_IFIFO + case internal.TypeSymlink: + mode |= unix.S_IFLNK + default: + return 0, fmt.Errorf("unknown type %s", typ) + } + return mode, nil +} + +func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { + path := entry.Name + if path == "" { + path = "/" + } else if path[0] != '/' { + path = "/" + path + } + + if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil { + return err + } + + nlinks := links[entry.Name] + links[entry.Linkname] + 1 + link := "" + if entry.Type == internal.TypeLink { + link = "@" + } + + rdev := unix.Mkdev(uint32(entry.Devmajor), uint32(entry.Devminor)) + + entryTime := entry.ModTime + if entryTime == nil { + t := time.Unix(0, 0) + entryTime = &t + } + + mode, err := getStMode(uint32(entry.Mode), entry.Type) + if err != nil { + return err + } + + if _, err := fmt.Fprintf(out, " %d %s%o %d %d %d %d %d.%d ", entry.Size, + link, mode, + nlinks, entry.UID, entry.GID, rdev, + entryTime.Unix(), entryTime.Nanosecond()); err != nil { + return err + } + + var payload string + if entry.Linkname != "" { + payload = entry.Linkname + if entry.Type == internal.TypeLink && payload[0] != '/' { + payload = "/" + payload + } + } else { + if len(entry.Digest) > 10 { + d := strings.Replace(entry.Digest, "sha256:", "", 1) + payload = d[:2] + "/" + d[2:] + } + } + + if _, err := fmt.Fprintf(out, escapedOptional(payload, ESCAPE_LONE_DASH)); err != nil { + return err + } + + /* inline content. */ + if _, err := fmt.Fprint(out, " -"); err != nil { + return err + } + + /* store the digest. */ + if _, err := fmt.Fprint(out, " "); err != nil { + return err + } + digest := verityDigests[payload] + if _, err := fmt.Fprintf(out, escapedOptional(digest, ESCAPE_LONE_DASH)); err != nil { + return err + } + + for k, v := range entry.Xattrs { + name := escaped(k, ESCAPE_EQUAL) + value := escaped(v, ESCAPE_EQUAL) + + if _, err := fmt.Fprintf(out, " %s=%s", name, value); err != nil { + return err + } + } + if _, err := fmt.Fprint(out, "\n"); err != nil { + return err + } + return nil +} + +// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump` +func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) { + toc, ok := tocI.(*internal.TOC) + if !ok { + return nil, fmt.Errorf("invalid TOC type") + } + pipeR, pipeW := io.Pipe() + go func() { + closed := false + w := bufio.NewWriter(pipeW) + defer func() { + if !closed { + w.Flush() + pipeW.Close() + } + }() + + links := make(map[string]int) + for _, e := range toc.Entries { + if e.Linkname == "" { + continue + } + links[e.Linkname] = links[e.Linkname] + 1 + } + + if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") { + root := &internal.FileMetadata{ + Name: "/", + Type: internal.TypeDir, + Mode: 0o755, + } + + if err := dumpNode(w, links, verityDigests, root); err != nil { + pipeW.CloseWithError(err) + closed = true + return + } + } + + for _, e := range toc.Entries { + if e.Type == internal.TypeChunk { + continue + } + if err := dumpNode(w, links, verityDigests, &e); err != nil { + pipeW.CloseWithError(err) + closed = true + return + } + } + }() + return pipeR, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage.go b/vendor/github.com/containers/storage/pkg/chunked/storage.go new file mode 100644 index 0000000000..752ee25200 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/storage.go @@ -0,0 +1,25 @@ +package chunked + +import ( + "io" +) + +// ImageSourceChunk is a portion of a blob. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob. +type ImageSourceSeekable interface { + // GetBlobAt returns a stream for the specified blob. + GetBlobAt([]ImageSourceChunk) (chan io.ReadCloser, chan error, error) +} + +// ErrBadRequest is returned when the request is not valid +type ErrBadRequest struct { //nolint: errname +} + +func (e ErrBadRequest) Error() string { + return "bad request" +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go new file mode 100644 index 0000000000..8493a2c19a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -0,0 +1,1989 @@ +package chunked + +import ( + archivetar "archive/tar" + "context" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/containerd/stargz-snapshotter/estargz" + storage "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + driversCopy "github.com/containers/storage/drivers/copy" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chunked/compressor" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/types" + securejoin "github.com/cyphar/filepath-securejoin" + jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/zstd" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" + "golang.org/x/sys/unix" +) + +const ( + maxNumberMissingChunks = 1024 + newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY) + containersOverrideXattr = "user.containers.override_stat" + bigDataKey = "zstd-chunked-manifest" + chunkedData = "zstd-chunked-data" + chunkedLayerDataKey = "zstd-chunked-layer-data" + tocKey = "toc" + + fileTypeZstdChunked = iota + fileTypeEstargz + fileTypeNoCompression + fileTypeHole + + copyGoRoutines = 32 +) + +type compressedFileType int + +type chunkedDiffer struct { + stream ImageSourceSeekable + manifest []byte + tarSplit []byte + layersCache *layersCache + tocOffset int64 + fileType compressedFileType + + copyBuffer []byte + + gzipReader *pgzip.Reader + zstdReader *zstd.Decoder + rawReader io.Reader + + // contentDigest is the digest of the uncompressed content + // (diffID) when the layer is fully retrieved. If the layer + // is not fully retrieved, instead of using the digest of the + // uncompressed content, it refers to the digest of the TOC. + contentDigest digest.Digest + + // convertedToZstdChunked is set to true if the layer needs to + // be converted to the zstd:chunked format before it can be + // handled. + convertToZstdChunked bool + + // skipValidation is set to true if the individual files in + // the layer are trusted and should not be validated. + skipValidation bool + + blobSize int64 + + storeOpts *types.StoreOptions +} + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + +// chunkedLayerData is used to store additional information about the layer +type chunkedLayerData struct { + Format graphdriver.DifferOutputFormat `json:"format"` +} + +func timeToTimespec(time *time.Time) (ts unix.Timespec) { + if time == nil || time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return unix.NsecToTimespec(time.UnixNano()) +} + +func doHardLink(srcFd int, destDirFd int, destBase string) error { + doLink := func() error { + // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses + // /proc/self/fd doesn't and can be used with rootless. + srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) + return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) + } + + err := doLink() + + // if the destination exists, unlink it first and try again + if err != nil && os.IsExist(err) { + unix.Unlinkat(destDirFd, destBase, 0) + return doLink() + } + return err +} + +func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { + src := fmt.Sprintf("/proc/self/fd/%d", srcFd) + st, err := os.Stat(src) + if err != nil { + return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) + } + + copyWithFileRange, copyWithFileClone := true, true + + if useHardLinks { + destDirPath := filepath.Dir(destFile) + destBase := filepath.Base(destFile) + destDir, err := openFileUnderRoot(destDirPath, dirfd, 0, mode) + if err == nil { + defer destDir.Close() + + err := doHardLink(srcFd, int(destDir.Fd()), destBase) + if err == nil { + return nil, st.Size(), nil + } + } + } + + // If the destination file already exists, we shouldn't blow it away + dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) + if err != nil { + return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) + } + + err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) + if err != nil { + dstFile.Close() + return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) + } + return dstFile, st.Size(), nil +} + +type seekableFile struct { + file *os.File +} + +func (f *seekableFile) Close() error { + return f.file.Close() +} + +func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + streams := make(chan io.ReadCloser) + errs := make(chan error) + + go func() { + for _, chunk := range chunks { + streams <- io.NopCloser(io.NewSectionReader(f.file, int64(chunk.Offset), int64(chunk.Length))) + } + close(streams) + close(errs) + }() + + return streams, errs, nil +} + +func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSourceSeekable) (*seekableFile, digest.Digest, map[string]string, error) { + var payload io.ReadCloser + var streams chan io.ReadCloser + var errs chan error + var err error + + chunksToRequest := []ImageSourceChunk{ + { + Offset: 0, + Length: uint64(blobSize), + }, + } + + streams, errs, err = iss.GetBlobAt(chunksToRequest) + if err != nil { + return nil, "", nil, err + } + select { + case p := <-streams: + payload = p + case err := <-errs: + return nil, "", nil, err + } + if payload == nil { + return nil, "", nil, errors.New("invalid stream returned") + } + + diff, err := archive.DecompressStream(payload) + if err != nil { + return nil, "", nil, err + } + + fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) + if err != nil { + return nil, "", nil, err + } + + f := os.NewFile(uintptr(fd), destDirectory) + + newAnnotations := make(map[string]string) + level := 1 + chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level) + if err != nil { + f.Close() + return nil, "", nil, err + } + + digester := digest.Canonical.Digester() + hash := digester.Hash() + + if _, err := io.Copy(io.MultiWriter(chunked, hash), diff); err != nil { + f.Close() + return nil, "", nil, err + } + if err := chunked.Close(); err != nil { + f.Close() + return nil, "", nil, err + } + is := seekableFile{ + file: f, + } + return &is, digester.Digest(), newAnnotations, nil +} + +// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. +func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return nil, err + } + + if _, ok := annotations[internal.ManifestChecksumKey]; ok { + return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + } + if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { + return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + } + + return makeConvertFromRawDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) +} + +func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { + if !parseBooleanPullOption(storeOpts, "convert_images", false) { + return nil, errors.New("convert_images not configured") + } + + layersCache, err := getLayersCache(store) + if err != nil { + return nil, err + } + + return &chunkedDiffer{ + blobSize: blobSize, + convertToZstdChunked: true, + copyBuffer: makeCopyBuffer(), + layersCache: layersCache, + storeOpts: storeOpts, + stream: iss, + }, nil +} + +func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) + if err != nil { + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) + } + layersCache, err := getLayersCache(store) + if err != nil { + return nil, err + } + + contentDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) + if err != nil { + return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err) + } + + return &chunkedDiffer{ + blobSize: blobSize, + contentDigest: contentDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeZstdChunked, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tarSplit: tarSplit, + tocOffset: tocOffset, + }, nil +} + +func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { + manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) + if err != nil { + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) + } + layersCache, err := getLayersCache(store) + if err != nil { + return nil, err + } + + contentDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + if err != nil { + return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err) + } + + return &chunkedDiffer{ + blobSize: blobSize, + contentDigest: contentDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeEstargz, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tocOffset: tocOffset, + }, nil +} + +func makeCopyBuffer() []byte { + return make([]byte, 2<<20) +} + +// copyFileFromOtherLayer copies a file from another layer +// file is the file to look for. +// source is the path to the source layer checkout. +// name is the path to the file to copy in source. +// dirfd is an open file descriptor to the destination root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) + if err != nil { + return false, nil, 0, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return false, nil, 0, fmt.Errorf("open source file under target rootfs (%s): %w", name, err) + } + defer srcFile.Close() + + dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) + if err != nil { + return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) + } + return true, dstFile, written, nil +} + +// canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. +// It checks that the two files have the same UID, GID, file mode and xattrs. +func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *internal.FileMetadata) bool { + if file.UID != otherFile.UID { + return false + } + if file.GID != otherFile.GID { + return false + } + if file.Mode != otherFile.Mode { + return false + } + if !reflect.DeepEqual(file.Xattrs, otherFile.Xattrs) { + return false + } + return true +} + +// canDedupFileWithHardLink checks if the specified file can be deduplicated by an +// open file, given its descriptor and stat data. +func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo) bool { + st, ok := s.Sys().(*syscall.Stat_t) + if !ok { + return false + } + + path := fmt.Sprintf("/proc/self/fd/%d", fd) + + listXattrs, err := system.Llistxattr(path) + if err != nil { + return false + } + + xattrs := make(map[string]string) + for _, x := range listXattrs { + v, err := system.Lgetxattr(path, x) + if err != nil { + return false + } + + if _, found := xattrsToIgnore[x]; found { + continue + } + xattrs[x] = string(v) + } + // fill only the attributes used by canDedupMetadataWithHardLink. + otherFile := internal.FileMetadata{ + UID: int(st.Uid), + GID: int(st.Gid), + Mode: int64(st.Mode), + Xattrs: xattrs, + } + return canDedupMetadataWithHardLink(file, &otherFile) +} + +// findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible. +// file is the file to look for. +// ostreeRepos is a list of OSTree repos. +// dirfd is an open fd to the destination checkout. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + digest, err := digest.Parse(file.Digest) + if err != nil { + logrus.Debugf("could not parse digest: %v", err) + return false, nil, 0, nil + } + payloadLink := digest.Encoded() + ".payload-link" + if len(payloadLink) < 2 { + return false, nil, 0, nil + } + + for _, repo := range ostreeRepos { + sourceFile := filepath.Join(repo, "objects", payloadLink[:2], payloadLink[2:]) + st, err := os.Stat(sourceFile) + if err != nil || !st.Mode().IsRegular() { + continue + } + if st.Size() != file.Size { + continue + } + fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) + if err != nil { + logrus.Debugf("could not open sourceFile %s: %v", sourceFile, err) + return false, nil, 0, nil + } + f := os.NewFile(uintptr(fd), "fd") + defer f.Close() + + // check if the open file can be deduplicated with hard links + if useHardLinks && !canDedupFileWithHardLink(file, fd, st) { + continue + } + + dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) + if err != nil { + logrus.Debugf("could not copyFileContent: %v", err) + return false, nil, 0, nil + } + return true, dstFile, written, nil + } + // If hard links deduplication was used and it has failed, try again without hard links. + if useHardLinks { + return findFileInOSTreeRepos(file, ostreeRepos, dirfd, false) + } + + return false, nil, 0, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// cache is the layers cache to use. +// file is the file to look for. +// dirfd is an open file descriptor to the checkout root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + target, name, err := cache.findFileInOtherLayers(file, useHardLinks) + if err != nil || name == "" { + return false, nil, 0, err + } + return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks) +} + +func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { + if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { + return nil + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + for i := range manifest { + if options.ChownOpts != nil { + manifest[i].UID = options.ChownOpts.UID + manifest[i].GID = options.ChownOpts.GID + } else { + pair := idtools.IDPair{ + UID: manifest[i].UID, + GID: manifest[i].GID, + } + var err error + manifest[i].UID, manifest[i].GID, err = idMappings.ToContainer(pair) + if err != nil { + return err + } + } + } + return nil +} + +func mapToSlice(inputMap map[uint32]struct{}) []uint32 { + var out []uint32 + for value := range inputMap { + out = append(out, value) + } + return out +} + +func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) { + uids := make(map[uint32]struct{}) + gids := make(map[uint32]struct{}) + for _, entry := range entries { + uids[uint32(entry.UID)] = struct{}{} + gids[uint32(entry.GID)] = struct{}{} + } + return mapToSlice(uids), mapToSlice(gids) +} + +type originFile struct { + Root string + Path string + Offset int64 +} + +type missingFileChunk struct { + Gap int64 + Hole bool + + File *internal.FileMetadata + + CompressedSize int64 + UncompressedSize int64 +} + +type missingPart struct { + Hole bool + SourceChunk *ImageSourceChunk + OriginFile *originFile + Chunks []missingFileChunk +} + +func (o *originFile) OpenFile() (io.ReadCloser, error) { + srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file under target rootfs: %w", err) + } + + if _, err := srcFile.Seek(o.Offset, 0); err != nil { + srcFile.Close() + return nil, err + } + return srcFile, nil +} + +// setFileAttrs sets the file attributes for file given metadata +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { + if file == nil || file.Fd() < 0 { + return errors.New("invalid file") + } + fd := int(file.Fd()) + + t, err := typeToTarType(metadata.Type) + if err != nil { + return err + } + + // If it is a symlink, force to use the path + if t == tar.TypeSymlink { + usePath = true + } + + baseName := "" + if usePath { + dirName := filepath.Dir(metadata.Name) + if dirName != "" { + parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) + if err != nil { + return err + } + defer parentFd.Close() + + dirfd = int(parentFd.Fd()) + } + baseName = filepath.Base(metadata.Name) + } + + doChown := func() error { + if usePath { + return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchown(fd, metadata.UID, metadata.GID) + } + + doSetXattr := func(k string, v []byte) error { + return unix.Fsetxattr(fd, k, v, 0) + } + + doUtimes := func() error { + ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} + if usePath { + return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) + } + + doChmod := func() error { + if usePath { + return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchmod(fd, uint32(mode)) + } + + if err := doChown(); err != nil { + if !options.IgnoreChownErrors { + return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) + } + } + + canIgnore := func(err error) bool { + return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) + } + + for k, v := range metadata.Xattrs { + if _, found := xattrsToIgnore[k]; found { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return fmt.Errorf("decode xattr %q: %w", v, err) + } + if err := doSetXattr(k, data); !canIgnore(err) { + return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) + } + } + + if err := doUtimes(); !canIgnore(err) { + return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) + } + + if err := doChmod(); !canIgnore(err) { + return fmt.Errorf("chmod %q: %w", metadata.Name, err) + } + return nil +} + +func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + root := fmt.Sprintf("/proc/self/fd/%d", dirfd) + + targetRoot, err := os.Readlink(root) + if err != nil { + return -1, err + } + + hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 + + var fd int + // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the + // last component as the path to openat(). + if hasNoFollow { + dirName := filepath.Dir(name) + if dirName != "" { + newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) + if err != nil { + return -1, err + } + root = newRoot + } + + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return -1, err + } + defer unix.Close(parentDirfd) + + fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } else { + newPath, err := securejoin.SecureJoin(root, name) + if err != nil { + return -1, err + } + fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } + + target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) + if err != nil { + unix.Close(fd) + return -1, err + } + + // Add an additional check to make sure the opened fd is inside the rootfs + if !strings.HasPrefix(target, targetRoot) { + unix.Close(fd) + return -1, fmt.Errorf("while resolving %q. It resolves outside the root directory", name) + } + + return fd, err +} + +func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + how := unix.OpenHow{ + Flags: flags, + Mode: uint64(mode & 0o7777), + Resolve: unix.RESOLVE_IN_ROOT, + } + return unix.Openat2(dirfd, name, &how) +} + +// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid +// using it again. +var skipOpenat2 int32 + +// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a +// userspace lookup. +func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + var fd int + var err error + if atomic.LoadInt32(&skipOpenat2) > 0 { + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } else { + fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) + // If the function failed with ENOSYS, switch off the support for openat2 + // and fallback to using safejoin. + if err != nil && errors.Is(err, unix.ENOSYS) { + atomic.StoreInt32(&skipOpenat2, 1) + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } + } + return fd, err +} + +// openFileUnderRoot safely opens a file under the specified root directory using openat2 +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// flags are the flags to pass to the open syscall. +// mode specifies the mode to use for newly created files. +func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + hasCreate := (flags & unix.O_CREAT) != 0 + if errors.Is(err, unix.ENOENT) && hasCreate { + parent := filepath.Dir(name) + if parent != "" { + newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err2 == nil { + defer newDirfd.Close() + fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } + } + } + return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) +} + +// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// mode specifies the mode to use for newly created files. +func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + if errors.Is(err, unix.ENOENT) { + parent := filepath.Dir(name) + if parent != "" { + pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) + if err2 != nil { + return nil, err + } + defer pDir.Close() + + baseName := filepath.Base(name) + + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0o755); err2 != nil { + return nil, err + } + + fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } + } + return nil, err +} + +func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) { + switch { + case partCompression == fileTypeHole: + // The entire part is a hole. Do not need to read from a file. + c.rawReader = nil + return fileTypeHole, nil + case mf.Hole: + // Only the missing chunk in the requested part refers to a hole. + // The received data must be discarded. + limitReader := io.LimitReader(from, mf.CompressedSize) + _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer) + return fileTypeHole, err + case partCompression == fileTypeZstdChunked: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.zstdReader == nil { + r, err := zstd.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.zstdReader = r + } else { + if err := c.zstdReader.Reset(c.rawReader); err != nil { + return partCompression, err + } + } + case partCompression == fileTypeEstargz: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.gzipReader == nil { + r, err := pgzip.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.gzipReader = r + } else { + if err := c.gzipReader.Reset(c.rawReader); err != nil { + return partCompression, err + } + } + case partCompression == fileTypeNoCompression: + c.rawReader = io.LimitReader(from, mf.UncompressedSize) + default: + return partCompression, fmt.Errorf("unknown file type %q", c.fileType) + } + return partCompression, nil +} + +// hashHole writes SIZE zeros to the specified hasher +func hashHole(h hash.Hash, size int64, copyBuffer []byte) error { + count := int64(len(copyBuffer)) + if size < count { + count = size + } + for i := int64(0); i < count; i++ { + copyBuffer[i] = 0 + } + for size > 0 { + count = int64(len(copyBuffer)) + if size < count { + count = size + } + if _, err := h.Write(copyBuffer[:count]); err != nil { + return err + } + size -= count + } + return nil +} + +// appendHole creates a hole with the specified size at the open fd. +func appendHole(fd int, size int64) error { + off, err := unix.Seek(fd, size, unix.SEEK_CUR) + if err != nil { + return err + } + // Make sure the file size is changed. It might be the last hole and no other data written afterwards. + if err := unix.Ftruncate(fd, off); err != nil { + return err + } + return nil +} + +func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error { + switch compression { + case fileTypeZstdChunked: + defer c.zstdReader.Reset(nil) + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeEstargz: + defer c.gzipReader.Close() + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeNoCompression: + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeHole: + if err := appendHole(int(destFile.file.Fd()), size); err != nil { + return err + } + if destFile.hash != nil { + if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil { + return err + } + } + default: + return fmt.Errorf("unknown file type %q", c.fileType) + } + return nil +} + +type destinationFile struct { + digester digest.Digester + dirfd int + file *os.File + hash hash.Hash + metadata *internal.FileMetadata + options *archive.TarOptions + skipValidation bool + to io.Writer +} + +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool) (*destinationFile, error) { + file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) + if err != nil { + return nil, err + } + + var digester digest.Digester + var hash hash.Hash + var to io.Writer + + if skipValidation { + to = file + } else { + digester = digest.Canonical.Digester() + hash = digester.Hash() + to = io.MultiWriter(file, hash) + } + + return &destinationFile{ + file: file, + digester: digester, + hash: hash, + to: to, + metadata: metadata, + options: options, + dirfd: dirfd, + skipValidation: skipValidation, + }, nil +} + +func (d *destinationFile) Close() (Err error) { + defer func() { + err := d.file.Close() + if Err == nil { + Err = err + } + }() + + if !d.skipValidation { + manifestChecksum, err := digest.Parse(d.metadata.Digest) + if err != nil { + return err + } + if d.digester.Digest() != manifestChecksum { + return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) + } + } + + return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) +} + +func closeDestinationFiles(files chan *destinationFile, errors chan error) { + for f := range files { + errors <- f.Close() + } + close(errors) +} + +func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { + var destFile *destinationFile + + filesToClose := make(chan *destinationFile, 3) + closeFilesErrors := make(chan error, 2) + + go closeDestinationFiles(filesToClose, closeFilesErrors) + defer func() { + close(filesToClose) + for e := range closeFilesErrors { + if e != nil && Err == nil { + Err = e + } + } + }() + + for _, missingPart := range missingParts { + var part io.ReadCloser + partCompression := c.fileType + switch { + case missingPart.Hole: + partCompression = fileTypeHole + case missingPart.OriginFile != nil: + var err error + part, err = missingPart.OriginFile.OpenFile() + if err != nil { + return err + } + partCompression = fileTypeNoCompression + case missingPart.SourceChunk != nil: + select { + case p := <-streams: + part = p + case err := <-errs: + if err == nil { + return errors.New("not enough data returned from the server") + } + return err + } + if part == nil { + return errors.New("invalid stream returned") + } + default: + return errors.New("internal error: missing part misses both local and remote data stream") + } + + for _, mf := range missingPart.Chunks { + if mf.Gap > 0 { + limitReader := io.LimitReader(part, mf.Gap) + _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer) + if err != nil { + Err = err + goto exit + } + continue + } + + if mf.File.Name == "" { + Err = errors.New("file name empty") + goto exit + } + + compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf) + if err != nil { + Err = err + goto exit + } + + // Open the new file if it is different that what is already + // opened + if destFile == nil || destFile.metadata.Name != mf.File.Name { + var err error + if destFile != nil { + cleanup: + for { + select { + case err = <-closeFilesErrors: + if err != nil { + Err = err + goto exit + } + default: + break cleanup + } + } + filesToClose <- destFile + } + destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation) + if err != nil { + Err = err + goto exit + } + } + + if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil { + Err = err + goto exit + } + if c.rawReader != nil { + if _, err := io.CopyBuffer(io.Discard, c.rawReader, c.copyBuffer); err != nil { + Err = err + goto exit + } + } + } + exit: + if part != nil { + part.Close() + if Err != nil { + break + } + } + } + + if destFile != nil { + return destFile.Close() + } + + return nil +} + +func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { + getGap := func(missingParts []missingPart, i int) int { + prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length + return int(missingParts[i].SourceChunk.Offset - prev) + } + getCost := func(missingParts []missingPart, i int) int { + cost := getGap(missingParts, i) + if missingParts[i-1].OriginFile != nil { + cost += int(missingParts[i-1].SourceChunk.Length) + } + if missingParts[i].OriginFile != nil { + cost += int(missingParts[i].SourceChunk.Length) + } + return cost + } + + // simple case: merge chunks from the same file. + newMissingParts := missingParts[0:1] + prevIndex := 0 + for i := 1; i < len(missingParts); i++ { + gap := getGap(missingParts, i) + if gap == 0 && missingParts[prevIndex].OriginFile == nil && + missingParts[i].OriginFile == nil && + !missingParts[prevIndex].Hole && !missingParts[i].Hole && + len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && + missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { + missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize + missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize + } else { + newMissingParts = append(newMissingParts, missingParts[i]) + prevIndex++ + } + } + missingParts = newMissingParts + + if len(missingParts) <= target { + return missingParts + } + + // this implementation doesn't account for duplicates, so it could merge + // more than necessary to reach the specified target. Since target itself + // is a heuristic value, it doesn't matter. + costs := make([]int, len(missingParts)-1) + for i := 1; i < len(missingParts); i++ { + costs[i-1] = getCost(missingParts, i) + } + sort.Ints(costs) + + toShrink := len(missingParts) - target + if toShrink >= len(costs) { + toShrink = len(costs) - 1 + } + targetValue := costs[toShrink] + + newMissingParts = missingParts[0:1] + for i := 1; i < len(missingParts); i++ { + if getCost(missingParts, i) > targetValue { + newMissingParts = append(newMissingParts, missingParts[i]) + } else { + gap := getGap(missingParts, i) + prev := &newMissingParts[len(newMissingParts)-1] + prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + prev.Hole = false + prev.OriginFile = nil + if gap > 0 { + gapFile := missingFileChunk{ + Gap: int64(gap), + } + prev.Chunks = append(prev.Chunks, gapFile) + } + prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...) + } + } + return newMissingParts +} + +func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { + var chunksToRequest []ImageSourceChunk + + calculateChunksToRequest := func() { + chunksToRequest = []ImageSourceChunk{} + for _, c := range missingParts { + if c.OriginFile == nil && !c.Hole { + chunksToRequest = append(chunksToRequest, *c.SourceChunk) + } + } + } + + calculateChunksToRequest() + + // There are some missing files. Prepare a multirange request for the missing chunks. + var streams chan io.ReadCloser + var err error + var errs chan error + for { + streams, errs, err = stream.GetBlobAt(chunksToRequest) + if err == nil { + break + } + + if _, ok := err.(ErrBadRequest); ok { + requested := len(missingParts) + // If the server cannot handle at least 64 chunks in a single request, just give up. + if requested < 64 { + return err + } + + // Merge more chunks to request + missingParts = mergeMissingChunks(missingParts, requested/2) + calculateChunksToRequest() + continue + } + return err + } + + if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { + return err + } + return nil +} + +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { + parent := filepath.Dir(name) + base := filepath.Base(name) + + parentFd := dirfd + if parent != "." { + parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err != nil { + return err + } + defer parentFile.Close() + parentFd = int(parentFile.Fd()) + } + + if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { + if !os.IsExist(err) { + return fmt.Errorf("mkdir %q: %w", name, err) + } + } + + file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err != nil { + return err + } + defer file.Close() + + return setFileAttrs(dirfd, file, mode, metadata, options, false) +} + +func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { + sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer sourceFile.Close() + + destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) + destDirFd := dirfd + if destDir != "." { + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) + if err != nil { + return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) + if err != nil { + // If the target is a symlink, open the file with O_PATH. + if errors.Is(err, unix.ELOOP) { + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, true) + } + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, false) +} + +func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { + destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) + destDirFd := dirfd + if destDir != "." { + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { + return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + return nil +} + +type whiteoutHandler struct { + Dirfd int + Root string +} + +func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { + file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) + if err != nil { + return err + } + defer file.Close() + + if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { + return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) + } + return nil +} + +func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { + dir := filepath.Dir(path) + base := filepath.Base(path) + + dirfd := d.Dirfd + if dir != "" { + dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) + if err != nil { + return err + } + defer dir.Close() + + dirfd = int(dir.Fd()) + } + + if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { + return fmt.Errorf("mknod %q: %w", path, err) + } + + return nil +} + +func checkChownErr(err error, name string, uid, gid int) error { + if errors.Is(err, syscall.EINVAL) { + return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err) + } + return err +} + +func (d whiteoutHandler) Chown(path string, uid, gid int) error { + file, err := openFileUnderRoot(path, d.Dirfd, unix.O_PATH, 0) + if err != nil { + return err + } + defer file.Close() + + if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil { + var stat unix.Stat_t + if unix.Fstat(int(file.Fd()), &stat) == nil { + if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) { + return nil + } + } + return checkChownErr(err, path, uid, gid) + } + return nil +} + +type hardLinkToCreate struct { + dest string + dirfd int + mode os.FileMode + metadata *internal.FileMetadata +} + +func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool { + if value, ok := storeOpts.PullOptions[name]; ok { + return strings.ToLower(value) == "true" + } + return def +} + +type findAndCopyFileOptions struct { + useHardLinks bool + ostreeRepos []string + options *archive.TarOptions +} + +func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { + finalizeFile := func(dstFile *os.File) error { + if dstFile != nil { + defer dstFile.Close() + if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { + return err + } + } + return nil + } + + found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + return false, nil +} + +func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMetadata, error) { + var new []internal.FileMetadata + + hashes := make(map[string]string) + for i := range mergedEntries { + if mergedEntries[i].Type != TypeReg { + continue + } + if mergedEntries[i].Digest == "" { + return nil, fmt.Errorf("missing digest for %q", mergedEntries[i].Name) + } + digest, err := digest.Parse(mergedEntries[i].Digest) + if err != nil { + return nil, err + } + d := digest.Encoded() + + if hashes[d] != "" { + continue + } + hashes[d] = d + + mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:]) + + new = append(new, mergedEntries[i]) + } + return new, nil +} + +func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) { + defer c.layersCache.release() + defer func() { + if c.zstdReader != nil { + c.zstdReader.Close() + } + }() + + // stream to use for reading the zstd:chunked or Estargz file. + stream := c.stream + + if c.convertToZstdChunked { + fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, c.blobSize, c.stream) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + // fileSource is a O_TMPFILE file descriptor, so we + // need to keep it open until the entire file is processed. + defer fileSource.Close() + + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err) + } + + // Use the new file for accessing the zstd:chunked file. + stream = fileSource + + // fill the chunkedDiffer with the data we just read. + c.fileType = fileTypeZstdChunked + c.manifest = manifest + c.tarSplit = tarSplit + // since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest. + c.contentDigest = diffID + c.tocOffset = tocOffset + + // the file was generated by us and the digest for each file was already computed, no need to validate it again. + c.skipValidation = true + } + + lcd := chunkedLayerData{ + Format: differOpts.Format, + } + + json := jsoniter.ConfigCompatibleWithStandardLibrary + lcdBigData, err := json.Marshal(lcd) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + // Generate the manifest + toc, err := unmarshalToc(c.manifest) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + output := graphdriver.DriverWithDifferOutput{ + Differ: c, + TarSplit: c.tarSplit, + BigData: map[string][]byte{ + bigDataKey: c.manifest, + chunkedLayerDataKey: lcdBigData, + }, + Artifacts: map[string]interface{}{ + tocKey: toc, + }, + TOCDigest: c.contentDigest, + } + + if !parseBooleanPullOption(c.storeOpts, "enable_partial_images", false) { + return output, errors.New("enable_partial_images not configured") + } + + // When the hard links deduplication is used, file attributes are ignored because setting them + // modifies the source file as well. + useHardLinks := parseBooleanPullOption(c.storeOpts, "use_hard_links", false) + + // List of OSTree repositories to use for deduplication + ostreeRepos := strings.Split(c.storeOpts.PullOptions["ostree_repos"], ":") + + whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + + var missingParts []missingPart + + output.UIDs, output.GIDs = collectIDs(toc.Entries) + + mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries) + if err != nil { + return output, err + } + + output.Size = totalSize + + if err := maybeDoIDRemap(mergedEntries, options); err != nil { + return output, err + } + + if options.ForceMask != nil { + uid, gid, mode, err := archive.GetFileOwner(dest) + if err == nil { + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err := unix.Setxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { + return output, err + } + } + } + + dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) + if err != nil { + return output, fmt.Errorf("cannot open %q: %w", dest, err) + } + defer unix.Close(dirfd) + + if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat { + mergedEntries, err = makeEntriesFlat(mergedEntries) + if err != nil { + return output, err + } + createdDirs := make(map[string]struct{}) + for _, e := range mergedEntries { + d := e.Name[0:2] + if _, found := createdDirs[d]; !found { + unix.Mkdirat(dirfd, d, 0o755) + createdDirs[d] = struct{}{} + } + } + } + + // hardlinks can point to missing files. So create them after all files + // are retrieved + var hardLinks []hardLinkToCreate + + missingPartsSize, totalChunksSize := int64(0), int64(0) + + copyOptions := findAndCopyFileOptions{ + useHardLinks: useHardLinks, + ostreeRepos: ostreeRepos, + options: options, + } + + type copyFileJob struct { + njob int + index int + mode os.FileMode + metadata *internal.FileMetadata + + found bool + err error + } + + var wg sync.WaitGroup + + copyResults := make([]copyFileJob, len(mergedEntries)) + + copyFileJobs := make(chan copyFileJob) + defer func() { + if copyFileJobs != nil { + close(copyFileJobs) + } + wg.Wait() + }() + + for i := 0; i < copyGoRoutines; i++ { + wg.Add(1) + jobs := copyFileJobs + + go func() { + defer wg.Done() + for job := range jobs { + found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode) + job.err = err + job.found = found + copyResults[job.njob] = job + } + }() + } + + filesToWaitFor := 0 + for i, r := range mergedEntries { + if options.ForceMask != nil { + value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&0o7777) + r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) + r.Mode = int64(*options.ForceMask) + } + + mode := os.FileMode(r.Mode) + + r.Name = filepath.Clean(r.Name) + r.Linkname = filepath.Clean(r.Linkname) + + t, err := typeToTarType(r.Type) + if err != nil { + return output, err + } + if whiteoutConverter != nil { + hdr := archivetar.Header{ + Typeflag: t, + Name: r.Name, + Linkname: r.Linkname, + Size: r.Size, + Mode: r.Mode, + Uid: r.UID, + Gid: r.GID, + } + handler := whiteoutHandler{ + Dirfd: dirfd, + Root: dest, + } + writeFile, err := whiteoutConverter.ConvertReadWithHandler(&hdr, r.Name, &handler) + if err != nil { + return output, err + } + if !writeFile { + continue + } + } + switch t { + case tar.TypeReg: + // Create directly empty files. + if r.Size == 0 { + // Used to have a scope for cleanup. + createEmptyFile := func() error { + file, err := openFileUnderRoot(r.Name, dirfd, newFileFlags, 0) + if err != nil { + return err + } + defer file.Close() + if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { + return err + } + return nil + } + if err := createEmptyFile(); err != nil { + return output, err + } + continue + } + + case tar.TypeDir: + if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { + return output, err + } + continue + + case tar.TypeLink: + dest := dest + dirfd := dirfd + mode := mode + r := r + hardLinks = append(hardLinks, hardLinkToCreate{ + dest: dest, + dirfd: dirfd, + mode: mode, + metadata: &r, + }) + continue + + case tar.TypeSymlink: + if err := safeSymlink(dirfd, mode, &r, options); err != nil { + return output, err + } + continue + + case tar.TypeChar: + case tar.TypeBlock: + case tar.TypeFifo: + /* Ignore. */ + default: + return output, fmt.Errorf("invalid type %q", t) + } + + totalChunksSize += r.Size + + if t == tar.TypeReg { + index := i + njob := filesToWaitFor + job := copyFileJob{ + mode: mode, + metadata: &mergedEntries[index], + index: index, + njob: njob, + } + copyFileJobs <- job + filesToWaitFor++ + } + } + + close(copyFileJobs) + copyFileJobs = nil + + wg.Wait() + + for _, res := range copyResults[:filesToWaitFor] { + r := &mergedEntries[res.index] + + if res.err != nil { + return output, res.err + } + // the file was already copied to its destination + // so nothing left to do. + if res.found { + continue + } + + missingPartsSize += r.Size + + remainingSize := r.Size + + // the file is missing, attempt to find individual chunks. + for _, chunk := range r.Chunks { + compressedSize := int64(chunk.EndOffset - chunk.Offset) + size := remainingSize + if chunk.ChunkSize > 0 { + size = chunk.ChunkSize + } + remainingSize = remainingSize - size + + rawChunk := ImageSourceChunk{ + Offset: uint64(chunk.Offset), + Length: uint64(compressedSize), + } + file := missingFileChunk{ + File: &mergedEntries[res.index], + CompressedSize: compressedSize, + UncompressedSize: size, + } + mp := missingPart{ + SourceChunk: &rawChunk, + Chunks: []missingFileChunk{ + file, + }, + } + + switch chunk.ChunkType { + case internal.ChunkTypeData: + root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk) + if err != nil { + return output, err + } + if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) { + missingPartsSize -= size + mp.OriginFile = &originFile{ + Root: root, + Path: path, + Offset: offset, + } + } + case internal.ChunkTypeZeros: + missingPartsSize -= size + mp.Hole = true + // Mark all chunks belonging to the missing part as holes + for i := range mp.Chunks { + mp.Chunks[i].Hole = true + } + } + missingParts = append(missingParts, mp) + } + } + // There are some missing files. Prepare a multirange request for the missing chunks. + if len(missingParts) > 0 { + missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) + if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil { + return output, err + } + } + + for _, m := range hardLinks { + if err := safeLink(m.dirfd, m.mode, m.metadata, options); err != nil { + return output, err + } + } + + if totalChunksSize > 0 { + logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) + } + + return output, nil +} + +func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { + // ignore the metadata files for the estargz format. + if fileType != fileTypeEstargz { + return false + } + switch e.Name { + // ignore the metadata files for the estargz format. + case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName: + return true + } + return false +} + +func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, int64, error) { + var totalFilesSize int64 + + countNextChunks := func(start int) int { + count := 0 + for _, e := range entries[start:] { + if e.Type != TypeChunk { + return count + } + count++ + } + return count + } + + size := 0 + for _, entry := range entries { + if mustSkipFile(fileType, entry) { + continue + } + if entry.Type != TypeChunk { + size++ + } + } + + mergedEntries := make([]internal.FileMetadata, size) + m := 0 + for i := 0; i < len(entries); i++ { + e := entries[i] + if mustSkipFile(fileType, e) { + continue + } + + totalFilesSize += e.Size + + if e.Type == TypeChunk { + return nil, -1, fmt.Errorf("chunk type without a regular file") + } + + if e.Type == TypeReg { + nChunks := countNextChunks(i + 1) + + e.Chunks = make([]*internal.FileMetadata, nChunks+1) + for j := 0; j <= nChunks; j++ { + e.Chunks[j] = &entries[i+j] + e.EndOffset = entries[i+j].EndOffset + } + i += nChunks + } + mergedEntries[m] = e + m++ + } + // stargz/estargz doesn't store EndOffset so let's calculate it here + lastOffset := c.tocOffset + for i := len(mergedEntries) - 1; i >= 0; i-- { + if mergedEntries[i].EndOffset == 0 { + mergedEntries[i].EndOffset = lastOffset + } + if mergedEntries[i].Offset != 0 { + lastOffset = mergedEntries[i].Offset + } + + lastChunkOffset := mergedEntries[i].EndOffset + for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { + mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset + mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset + lastChunkOffset = mergedEntries[i].Chunks[j].Offset + } + } + return mergedEntries, totalFilesSize, nil +} + +// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the +// same digest as chunk.ChunkDigest +func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return false + } + defer unix.Close(parentDirfd) + + fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0) + if err != nil { + return false + } + defer fd.Close() + + if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil { + return false + } + + r := io.LimitReader(fd, chunk.ChunkSize) + digester := digest.Canonical.Digester() + + if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil { + return false + } + + digest, err := digest.Parse(chunk.ChunkDigest) + if err != nil { + return false + } + + return digester.Digest() == digest +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go new file mode 100644 index 0000000000..8d3fcf2ba4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -0,0 +1,17 @@ +//go:build !linux +// +build !linux + +package chunked + +import ( + "context" + "errors" + + storage "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" +) + +// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. +func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + return nil, errors.New("format not supported on this system") +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go new file mode 100644 index 0000000000..20d72ca899 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -0,0 +1,342 @@ +package config + +import ( + "fmt" + "os" +) + +// ThinpoolOptionsConfig represents the "storage.options.thinpool" +// TOML config table. +type ThinpoolOptionsConfig struct { + // AutoExtendPercent determines the amount by which pool needs to be + // grown. This is specified in terms of % of pool size. So a value of + // 20 means that when threshold is hit, pool will be grown by 20% of + // existing pool size. + AutoExtendPercent string `toml:"autoextend_percent,omitempty"` + + // AutoExtendThreshold determines the pool extension threshold in terms + // of percentage of pool size. For example, if threshold is 60, that + // means when pool is 60% full, threshold has been hit. + AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"` + + // BaseSize specifies the size to use when creating the base device, + // which limits the size of images and containers. + BaseSize string `toml:"basesize,omitempty"` + + // BlockSize specifies a custom blocksize to use for the thin pool. + BlockSize string `toml:"blocksize,omitempty"` + + // DirectLvmDevice specifies a custom block storage device to use for + // the thin pool. + DirectLvmDevice string `toml:"directlvm_device,omitempty"` + + // DirectLvmDeviceForcewipes device even if device already has a + // filesystem + DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"` + + // Fs specifies the filesystem type to use for the base device. + Fs string `toml:"fs,omitempty"` + + // log_level sets the log level of devicemapper. + LogLevel string `toml:"log_level,omitempty"` + + // MetadataSize specifies the size of the metadata for the thinpool + // It will be used with the `pvcreate --metadata` option. + MetadataSize string `toml:"metadatasize,omitempty"` + + // MinFreeSpace specifies the min free space percent in a thin pool + // require for new device creation to + MinFreeSpace string `toml:"min_free_space,omitempty"` + + // MkfsArg specifies extra mkfs arguments to be used when creating the + // basedevice. + MkfsArg string `toml:"mkfsarg,omitempty"` + + // MountOpt specifies extra mount options used when mounting the thin + // devices. + MountOpt string `toml:"mountopt,omitempty"` + + // Size + Size string `toml:"size,omitempty"` + + // UseDeferredDeletion marks device for deferred deletion + UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"` + + // UseDeferredRemoval marks device for deferred removal + UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"` + + // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of + // retries XFS should attempt to complete IO when ENOSPC (no space) + // error is returned by underlying storage device. + XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"` +} + +type AufsOptionsConfig struct { + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt,omitempty"` +} + +type BtrfsOptionsConfig struct { + // MinSpace is the minimal spaces allocated to the device + MinSpace string `toml:"min_space,omitempty"` + // Size + Size string `toml:"size,omitempty"` +} + +type OverlayOptionsConfig struct { + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt,omitempty"` + // Alternative program to use for the mount of the file system + MountProgram string `toml:"mount_program,omitempty"` + // Size + Size string `toml:"size,omitempty"` + // Inodes is used to set a maximum inodes of the container image. + Inodes string `toml:"inodes,omitempty"` + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home,omitempty"` + // ForceMask indicates the permissions mask (e.g. "0755") to use for new + // files and directories + ForceMask string `toml:"force_mask,omitempty"` +} + +type VfsOptionsConfig struct { + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` +} + +type ZfsOptionsConfig struct { + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt,omitempty"` + // Name is the File System name of the ZFS File system + Name string `toml:"fsname,omitempty"` + // Size + Size string `toml:"size,omitempty"` +} + +// OptionsConfig represents the "storage.options" TOML config table. +type OptionsConfig struct { + // AdditionalImagesStores is the location of additional read/only + // Image stores. Usually used to access Networked File System + // for shared image content + AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` + + // ImageStore is the location of image store which is separated from the + // container store. Usually this is not recommended unless users wants + // separate store for image and containers. + ImageStore string `toml:"imagestore,omitempty"` + + // AdditionalLayerStores is the location of additional read/only + // Layer stores. Usually used to access Networked File System + // for shared image content + // This API is experimental and can be changed without bumping the + // major version number. + AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"` + + // Size + Size string `toml:"size,omitempty"` + + // RemapUIDs is a list of default UID mappings to use for layers. + RemapUIDs string `toml:"remap-uids,omitempty"` + // RemapGIDs is a list of default GID mappings to use for layers. + RemapGIDs string `toml:"remap-gids,omitempty"` + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` + + // ForceMask indicates the permissions mask (e.g. "0755") to use for new + // files and directories. + ForceMask os.FileMode `toml:"force_mask,omitempty"` + + // RemapUser is the name of one or more entries in /etc/subuid which + // should be used to set up default UID mappings. + RemapUser string `toml:"remap-user,omitempty"` + // RemapGroup is the name of one or more entries in /etc/subgid which + // should be used to set up default GID mappings. + RemapGroup string `toml:"remap-group,omitempty"` + + // RootAutoUsernsUser is the name of one or more entries in /etc/subuid and + // /etc/subgid which should be used to set up automatically a userns. + RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"` + + // AutoUsernsMinSize is the minimum size for a user namespace that is + // created automatically. + AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"` + + // AutoUsernsMaxSize is the maximum size for a user namespace that is + // created automatically. + AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"` + + // Aufs container options to be handed to aufs drivers + Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"` + + // Btrfs container options to be handed to btrfs drivers + Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` + + // Thinpool container options to be handed to thinpool drivers + Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"` + + // Overlay container options to be handed to overlay drivers + Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` + + // Vfs container options to be handed to VFS drivers + Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"` + + // Zfs container options to be handed to ZFS drivers + Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"` + + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home,omitempty"` + + // Alternative program to use for the mount of the file system + MountProgram string `toml:"mount_program,omitempty"` + + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt,omitempty"` + + // PullOptions specifies options to be handed to pull managers + // This API is experimental and can be changed without bumping the major version number. + PullOptions map[string]string `toml:"pull_options,omitempty"` + + // DisableVolatile doesn't allow volatile mounts when it is set. + DisableVolatile bool `toml:"disable-volatile,omitempty"` +} + +// GetGraphDriverOptions returns the driver specific options +func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { + var doptions []string + switch driverName { + case "aufs": + if options.Aufs.MountOpt != "" { + return append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Aufs.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + + case "btrfs": + if options.Btrfs.MinSpace != "" { + return append(doptions, fmt.Sprintf("%s.min_space=%s", driverName, options.Btrfs.MinSpace)) + } + if options.Btrfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Btrfs.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + + case "devicemapper": + if options.Thinpool.AutoExtendPercent != "" { + doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent)) + } + if options.Thinpool.AutoExtendThreshold != "" { + doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold)) + } + if options.Thinpool.BaseSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize)) + } + if options.Thinpool.BlockSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize)) + } + if options.Thinpool.DirectLvmDevice != "" { + doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice)) + } + if options.Thinpool.DirectLvmDeviceForce != "" { + doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce)) + } + if options.Thinpool.Fs != "" { + doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs)) + } + if options.Thinpool.LogLevel != "" { + doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel)) + } + if options.Thinpool.MetadataSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize)) + } + if options.Thinpool.MinFreeSpace != "" { + doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace)) + } + if options.Thinpool.MkfsArg != "" { + doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg)) + } + if options.Thinpool.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + + if options.Thinpool.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + + if options.Thinpool.UseDeferredDeletion != "" { + doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion)) + } + if options.Thinpool.UseDeferredRemoval != "" { + doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval)) + } + if options.Thinpool.XfsNoSpaceMaxRetries != "" { + doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries)) + } + + case "overlay", "overlay2": + if options.Overlay.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors)) + } else if options.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) + } + if options.Overlay.MountProgram != "" { + doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.Overlay.MountProgram)) + } else if options.MountProgram != "" { + doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.MountProgram)) + } + if options.Overlay.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Overlay.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + if options.Overlay.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Overlay.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + if options.Overlay.Inodes != "" { + doptions = append(doptions, fmt.Sprintf("%s.inodes=%s", driverName, options.Overlay.Inodes)) + } + if options.Overlay.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome)) + } else if options.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome)) + } + if options.Overlay.ForceMask != "" { + doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.Overlay.ForceMask)) + } else if options.ForceMask != 0 { + doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask)) + } + case "vfs": + if options.Vfs.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors)) + } else if options.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) + } + + case "zfs": + if options.Zfs.Name != "" { + doptions = append(doptions, fmt.Sprintf("%s.fsname=%s", driverName, options.Zfs.Name)) + } + if options.Zfs.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Zfs.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + if options.Zfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Zfs.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + } + return doptions +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go new file mode 100644 index 0000000000..33bf7184e3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -0,0 +1,813 @@ +//go:build linux && cgo +// +build linux,cgo + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Same as DM_DEVICE_* enum values from libdevmapper.h +// nolint: unused +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + runtime.KeepAlive(t) + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { //nolint:unused + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string, +) { + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) + return ErrUdevWait + } + return nil +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(cookie) + + dmSawBusy = false // reset before the task is run + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + cookie := new(uint) + flags := uint16(DmUdevDisableLibraryFallback) + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all following calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(cookie) + + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage("@cancel_deferred_remove"); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + unix.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + cookie := new(uint) + flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag) + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { + if doSuspend { + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: %w", err, err2) + } + } + return err + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go new file mode 100644 index 0000000000..6cfef0a5bf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,123 @@ +//go:build linux && cgo +// +build linux,cgo + +package devicemapper + +import "C" + +import ( + "fmt" + "strings" + + "github.com/sirupsen/logrus" +) + +// DevmapperLogger defines methods required to register as a callback for +// logging events received from devicemapper. Note that devicemapper will send +// *all* logs regardless to callbacks (including debug logs) so it's +// recommended to not spam the console with the outputs. +type DevmapperLogger interface { + // DMLog is the logging callback containing all of the information from + // devicemapper. The interface is identical to the C libdm counterpart. + DMLog(level int, file string, line int, dmError int, message string) +} + +// dmLogger is the current logger in use that is being forwarded our messages. +var dmLogger DevmapperLogger + +// LogInit changes the logging callback called after processing libdm logs for +// error message information. The default logger simply forwards all logs to +// logrus. Calling LogInit(nil) disables the calling of callbacks. +func LogInit(logger DevmapperLogger) { + dmLogger = logger +} + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that +// because we are using callbacks, this function will be called for *every* log +// in libdm (even debug ones because there's no way of setting the verbosity +// level for an external logging callback). +// +//export StorageDevmapperLogCallback +func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + + // Track what errno libdm saw, because the library only gives us 0 or 1. + if level < LogLevelDebug { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} + +// DefaultLogger is the default logger used by pkg/devicemapper. It forwards +// all logs that are of higher or equal priority to the given level to the +// corresponding logrus level. +type DefaultLogger struct { + // Level corresponds to the highest libdm level that will be forwarded to + // logrus. In order to change this, register a new DefaultLogger. + Level int +} + +// DMLog is the logging callback containing all of the information from +// devicemapper. The interface is identical to the C libdm counterpart. +func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { + if level <= l.Level { + // Forward the log to the correct logrus level, if allowed by dmLogLevel. + logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + switch level { + case LogLevelFatal, LogLevelErr: + logrus.Error(logMsg) + case LogLevelWarn: + logrus.Warn(logMsg) + case LogLevelNotice, LogLevelInfo: + logrus.Info(logMsg) + case LogLevelDebug: + logrus.Debug(logMsg) + default: + // Don't drop any "unknown" levels. + logrus.Info(logMsg) + } + } +} + +// registerLogCallback registers our own logging callback function for libdm +// (which is StorageDevmapperLogCallback). +// +// Because libdm only gives us {0,1} error codes we need to parse the logs +// produced by libdm (to set dmSawBusy and so on). Note that by registering a +// callback using StorageDevmapperLogCallback, libdm will no longer output logs to +// stderr so we have to log everything ourselves. None of this handling is +// optional because we depend on log callbacks to parse the logs, and if we +// don't forward the log information we'll be in a lot of trouble when +// debugging things. +func registerLogCallback() { + LogWithErrnoInit() +} + +func init() { + // Use the default logger by default. We only allow LogLevelFatal by + // default, because internally we mask a lot of libdm errors by retrying + // and similar tricks. Also, libdm is very chatty and we don't want to + // worry users for no reason. + dmLogger = DefaultLogger{ + Level: LogLevelFatal, + } + + // Register as early as possible so we don't miss anything. + registerLogCallback() +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 0000000000..9aef4c2fb9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,252 @@ +//go:build linux && cgo +// +build linux,cgo + +package devicemapper + +/* +#define _GNU_SOURCE +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char *buffer = NULL; + va_list ap; + int ret; + + va_start(ap, f); + ret = vasprintf(&buffer, f, ap); + va_end(ap); + if (ret < 0) { + // memory allocation failed -- should never happen? + return; + } + + StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); + free(buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string, +) int { + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + var devices []C.uint64_t + devicesHdr := (*reflect.SliceHeader)(unsafe.Pointer(&devices)) + devicesHdr.Data = uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))) + devicesHdr.Len = int(Cdeps.count) + devicesHdr.Cap = int(Cdeps.count) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 0000000000..071f7f35b1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,32 @@ +//go:build linux && cgo && !libdm_no_deferred_remove +// +build linux,cgo,!libdm_no_deferred_remove + +package devicemapper + +// #include +import "C" + +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go new file mode 100644 index 0000000000..93dcc32219 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go @@ -0,0 +1,7 @@ +//go:build linux && cgo && !static_build +// +build linux,cgo,!static_build + +package devicemapper + +// #cgo pkg-config: devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 0000000000..91906f2efe --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,16 @@ +//go:build linux && cgo && libdm_no_deferred_remove +// +build linux,cgo,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go new file mode 100644 index 0000000000..68ea48fe57 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go @@ -0,0 +1,7 @@ +//go:build linux && cgo && static_build +// +build linux,cgo,static_build + +package devicemapper + +// #cgo pkg-config: --static devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go new file mode 100644 index 0000000000..90ffe2c3fd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -0,0 +1,29 @@ +//go:build linux && cgo +// +build linux,cgo + +package devicemapper + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go new file mode 100644 index 0000000000..cee5e54549 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go new file mode 100644 index 0000000000..829fe59f3a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory.go @@ -0,0 +1,31 @@ +package directory + +import ( + "os" + "path/filepath" +) + +// DiskUsage is a structure that describes the disk usage (size and inode count) +// of a particular directory. +type DiskUsage struct { + Size int64 + InodeCount int64 +} + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + infos, err := os.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go new file mode 100644 index 0000000000..36e1bdd5fc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -0,0 +1,62 @@ +//go:build linux || darwin || freebsd || solaris +// +build linux darwin freebsd solaris + +package directory + +import ( + "io/fs" + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + usage, err := Usage(dir) + if err != nil { + return 0, err + } + return usage.Size, nil +} + +// Usage walks a directory tree and returns its total size in bytes and the number of inodes. +func Usage(dir string) (usage *DiskUsage, err error) { + usage = &DiskUsage{} + data := make(map[uint64]struct{}) + err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error { + if err != nil { + // if dir does not exist, Usage() returns the error. + // if dir/x disappeared while walking, Usage() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + fileInfo, err := entry.Info() + if err != nil { + return err + } + + // Check inode to only count the sizes of files with multiple hard links once. + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + // Ignore directory sizes + if entry.IsDir() { + return nil + } + + usage.Size += fileInfo.Size() + + return nil + }) + // inode count is the number of unique inode numbers we saw + usage.InodeCount = int64(len(data)) + return +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go new file mode 100644 index 0000000000..482bc51a26 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -0,0 +1,50 @@ +//go:build windows +// +build windows + +package directory + +import ( + "io/fs" + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes +func Size(dir string) (size int64, err error) { + usage, err := Usage(dir) + if err != nil { + return 0, nil + } + return usage.Size, nil +} + +// Usage walks a directory tree and returns its total size in bytes and the number of inodes. +func Usage(dir string) (usage *DiskUsage, err error) { + usage = &DiskUsage{} + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && path != dir { + return nil + } + return err + } + + usage.InodeCount++ + + // Ignore directory sizes + if d.IsDir() { + return nil + } + + fileInfo, err := d.Info() + if err != nil { + return err + } + usage.Size += fileInfo.Size() + + return nil + }) + return +} diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go new file mode 100644 index 0000000000..e883d25f51 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go @@ -0,0 +1,21 @@ +//go:build linux +// +build linux + +package dmesg + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Dmesg returns last messages from the kernel log, up to size bytes +func Dmesg(size int) []byte { + t := uintptr(3) // SYSLOG_ACTION_READ_ALL + b := make([]byte, size) + amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) + if err != 0 { + return []byte{} + } + return b[:amt] +} diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go new file mode 100644 index 0000000000..9854cac1c2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,88 @@ +//go:build linux +// +build linux + +package fsutils + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := os.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := os.CreateTemp(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *unix.Dirent) bool { + visited++ + if ent.Type == unix.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := unix.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go new file mode 100644 index 0000000000..87484d95b6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go @@ -0,0 +1,88 @@ +//go:build linux +// +build linux + +package idmap + +import ( + "fmt" + "os" + "runtime" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "golang.org/x/sys/unix" +) + +// CreateIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace +// for the PID process. +func CreateIDMappedMount(source, target string, pid int) error { + path := fmt.Sprintf("/proc/%d/ns/user", pid) + userNsFile, err := os.Open(path) + if err != nil { + return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err) + } + defer userNsFile.Close() + + targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE) + if err != nil { + return err + } + defer unix.Close(targetDirFd) + + if err := unix.MountSetattr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, + &unix.MountAttr{ + Attr_set: unix.MOUNT_ATTR_IDMAP, + Userns_fd: uint64(userNsFile.Fd()), + }); err != nil { + return err + } + if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) { + return err + } + + return unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH) +} + +// CreateUsernsProcess forks the current process and creates a user namespace using the specified +// mappings. It returns the pid of the new process. +func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) { + var pid uintptr + var err syscall.Errno + + if runtime.GOARCH == "s390x" { + pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), 0, unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0) + } else { + pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0) + } + if err != 0 { + return -1, nil, err + } + if pid == 0 { + _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0) + // just wait for the SIGKILL + for { + syscall.Pause() + } + } + cleanupFunc := func() { + unix.Kill(int(pid), unix.SIGKILL) + _, _ = unix.Wait4(int(pid), nil, 0, nil) + } + writeMappings := func(fname string, idmap []idtools.IDMap) error { + mappings := "" + for _, m := range idmap { + mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0o600) + } + if err := writeMappings("uid_map", uidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + if err := writeMappings("gid_map", gidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + + return int(pid), cleanupFunc, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go new file mode 100644 index 0000000000..81c6072aad --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go @@ -0,0 +1,22 @@ +//go:build !linux +// +build !linux + +package idmap + +import ( + "fmt" + + "github.com/containers/storage/pkg/idtools" +) + +// CreateIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace +// for the PID process. +func CreateIDMappedMount(source, target string, pid int) error { + return fmt.Errorf("IDMapped mounts are not supported") +} + +// CreateUsernsProcess forks the current process and creates a user namespace using the specified +// mappings. It returns the pid of the new process. +func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) { + return -1, nil, fmt.Errorf("IDMapped mounts are not supported") +} diff --git a/vendor/github.com/containers/storage/pkg/locker/README.md b/vendor/github.com/containers/storage/pkg/locker/README.md new file mode 100644 index 0000000000..ad15e89af1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/containers/storage/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/containers/storage/pkg/locker/locker.go b/vendor/github.com/containers/storage/pkg/locker/locker.go new file mode 100644 index 0000000000..0b22ddfab8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go new file mode 100644 index 0000000000..40d8fd2b89 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -0,0 +1,173 @@ +//go:build linux && cgo +// +build linux,cgo + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0o644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { + // Read information about the loopback file. + var st syscall.Stat_t + err = syscall.Fstat(int(sparseFile.Fd()), &st) + if err != nil { + logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644) + if err != nil { + logrus.Errorf("Opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + + // Check if the loopback driver and underlying filesystem agree on the loopback file's + // device and inode numbers. + dev, ino, err := getLoopbackBackingFile(loopFile) + if err != nil { + logrus.Errorf("Getting loopback backing file: %s", err) + return nil, ErrGetLoopbackBackingFile + } + if dev != uint64(st.Dev) || ino != st.Ino { + logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino) + } + + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + return attachLoopDevice(sparseName, false) +} + +// AttachLoopDeviceRO attaches the given sparse file opened read-only to +// the next available loopback device. It returns an opened *os.File. +func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) { + return attachLoopDevice(sparseName, true) +} + +func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) { + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + var sparseFile *os.File + + // OpenFile adds O_CLOEXEC + if readonly { + sparseFile, err = os.OpenFile(sparseName, os.O_RDONLY, 0o644) + } else { + sparseFile, err = os.OpenFile(sparseName, os.O_RDWR, 0o644) + } + if err != nil { + logrus.Errorf("Opening sparse file: %v", err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("While cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go new file mode 100644 index 0000000000..da2ba46fe8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -0,0 +1,54 @@ +//go:build linux && cgo +// +build linux,cgo + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go new file mode 100644 index 0000000000..21a981007b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -0,0 +1,53 @@ +//go:build linux && cgo +// +build linux,cgo + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go new file mode 100644 index 0000000000..ec42872476 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -0,0 +1,64 @@ +//go:build linux && cgo +// +build linux,cgo + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Get loopback backing file: %v", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == uint64(targetDevice) && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go b/vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go new file mode 100644 index 0000000000..460b307099 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go @@ -0,0 +1 @@ +package loopback diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go new file mode 100644 index 0000000000..6406cb14ff --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go @@ -0,0 +1,75 @@ +//go:build !windows +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 0000000000..645790da64 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,57 @@ +//go:build darwin +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("kernel version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("kernel version is invalid: %w", err) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("kernel version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 0000000000..ed8cca2c62 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,46 @@ +//go:build linux || freebsd || solaris || openbsd +// +build linux freebsd solaris openbsd + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("Error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 0000000000..4b7fdee830 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,70 @@ +//go:build windows +// +build windows + +package kernel + +import ( + "fmt" + "unsafe" + + "golang.org/x/sys/windows" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + var ( + h windows.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, + windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + windows.KEY_READ, + &h); err != nil { + return KVI, err + } + defer windows.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = windows.RegQueryValueEx(h, + windows.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = windows.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = windows.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0xFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go new file mode 100644 index 0000000000..e913fad001 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go @@ -0,0 +1,17 @@ +package kernel + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 0000000000..e913fad001 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,17 @@ +package kernel + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 0000000000..49370bd3dd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 0000000000..12671db513 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,14 @@ +//go:build openbsd +// +build openbsd + +package kernel + +import ( + "fmt" + "runtime" +) + +// A stub called by kernel_unix.go . +func uname() (*Utsname, error) { + return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS) +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go new file mode 100644 index 0000000000..f515500c92 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go @@ -0,0 +1,11 @@ +//go:build !linux && !solaris && !freebsd +// +build !linux,!solaris,!freebsd + +package kernel + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go new file mode 100644 index 0000000000..3fb0c36b88 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go @@ -0,0 +1,70 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/stringid/README.md b/vendor/github.com/containers/storage/pkg/stringid/README.md new file mode 100644 index 0000000000..37a5098fd9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go new file mode 100644 index 0000000000..20abc74073 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -0,0 +1,106 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + cryptorand "crypto/rand" + "encoding/hex" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/storage/pkg/regexp" +) + +const shortLen = 12 + +var ( + validShortID = regexp.Delayed("^[a-f0-9]{12}$") + validHex = regexp.Delayed(`^[a-f0-9]{64}$`) + + rngLock sync.Mutex + rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held. +) + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +func generateID(r io.Reader) string { + b := make([]byte, 32) + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a pseudorandom 64-character hex string. +func GenerateRandomID() string { + return generateID(cryptorand.Reader) +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + rngLock.Lock() + defer rngLock.Unlock() + return generateID(readerFunc(rng.Read)) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +func init() { + // Initialize a private RNG so we generate random ids. Tries to use a + // crypto seed before falling back to time. + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + + rng = rand.New(rand.NewSource(seed)) +} + +type readerFunc func(p []byte) (int, error) + +func (fn readerFunc) Read(p []byte) (int, error) { + return fn(p) +} diff --git a/vendor/github.com/containers/storage/pkg/stringutils/README.md b/vendor/github.com/containers/storage/pkg/stringutils/README.md new file mode 100644 index 0000000000..b3e454573c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go new file mode 100644 index 0000000000..66a59c85d5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -0,0 +1,110 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.EqualFold(s, ss) { + return true + } + } + return false +} + +// RemoveFromSlice removes a string from a slice. The string can be present +// multiple times. The entire slice is iterated. +func RemoveFromSlice(slice []string, s string) (ret []string) { + for _, ss := range slice { + if !strings.EqualFold(s, ss) { + ret = append(ret, ss) + } + } + return ret +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and an open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go new file mode 100644 index 0000000000..674e0a0bae --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go @@ -0,0 +1,66 @@ +package tarlog + +import ( + "io" + "sync" + + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" +) + +type tarLogger struct { + writer *io.PipeWriter + closeMutex *sync.Mutex + closed bool +} + +// NewLogger returns a writer that, when a tar archive is written to it, calls +// `logger` for each file header it encounters in the archive. +func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) { + reader, writer := io.Pipe() + t := &tarLogger{ + writer: writer, + closeMutex: new(sync.Mutex), + closed: false, + } + tr := tar.NewReader(reader) + t.closeMutex.Lock() + go func() { + hdr, err := tr.Next() + for err == nil { + logger(hdr) + hdr, err = tr.Next() + + } + // Make sure to avoid writes after the reader has been closed. + if err := reader.Close(); err != nil { + logrus.Errorf("Closing tarlogger reader: %v", err) + } + // Unblock the Close(). + t.closeMutex.Unlock() + }() + return t, nil +} + +func (t *tarLogger) Write(b []byte) (int, error) { + if t.closed { + // We cannot use os.Pipe() as this alters the tar's digest. Using + // io.Pipe() requires this workaround as it does not allow for writes + // after close. + return len(b), nil + } + n, err := t.writer.Write(b) + if err == io.ErrClosedPipe { + // The pipe got closed. Track it and avoid to call Write in future. + t.closed = true + return len(b), nil + } + return n, err +} + +func (t *tarLogger) Close() error { + err := t.writer.Close() + // Wait for the reader to finish. + t.closeMutex.Lock() + return err +} diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go new file mode 100644 index 0000000000..c14a5cc4d2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go @@ -0,0 +1,139 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/v2/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { //nolint: errname + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +// Invalid IDs are _silently_ ignored. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + _ = idx.addID(id) // Ignore invalid IDs. Duplicate IDs are not a problem. + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + return idx.addID(id) +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var id string + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs and passes each of them to the given +// handler. Take care that the handler method does not call any public +// method on truncindex as the internal locking is not reentrant/recursive +// and will result in deadlock. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.Lock() + defer idx.Unlock() + // Ignore the error from Visit: it can only fail if the provided visitor fails, and ours never does. + _ = idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf new file mode 100644 index 0000000000..cb4525f27d --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf @@ -0,0 +1,243 @@ +# This file is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "overlay" + +# Temporary storage location +runroot = "/run/containers/storage" + +# Primary Read/Write location of container storage +# When changing the graphroot location on an SELINUX system, you must +# ensure the labeling matches the default locations labels with the +# following commands: +# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH +# restorecon -R -v /NEWSTORAGEPATH +graphroot = "/var/lib/containers/storage" + +# Optional alternate location of image store if a location separate from the +# container store is required. If set, it must be different than graphroot. +# imagestore = "" + + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +# Transient store mode makes all container metadata be saved in temporary storage +# (i.e. runroot above). This is faster, but doesn't persist across reboots. +# Additional garbage collection must also be performed at boot-time, so this +# option should remain disabled in most configurations. +# transient_store = true + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Allows specification of how storage is populated when pulling images. This +# option can speed the pulling process of images compressed with format +# zstd:chunked. Containers/storage looks for files within images that are being +# pulled from a container registry that were previously pulled to the host. It +# can copy or create a hard link to the existing file when it finds them, +# eliminating the need to pull them from the container registry. These options +# can deduplicate pulling of content, disk storage of content and can allow the +# kernel to use less memory when running containers. + +# containers/storage supports three keys +# * enable_partial_images="true" | "false" +# Tells containers/storage to look for files previously pulled in storage +# rather then always pulling them from the container registry. +# * use_hard_links = "false" | "true" +# Tells containers/storage to use hard links rather then create new files in +# the image, if an identical file already existed in storage. +# * ostree_repos = "" +# Tells containers/storage where an ostree repository exists that might have +# previously pulled content which can be used when attempting to avoid +# pulling content from the container registry +pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""} + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = "0:1668442479:65536" +# remap-gids = "0:1668442479:65536" + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. This setting overrides the +# Remap-UIDs/GIDs setting. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the maximum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather than the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd new file mode 100644 index 0000000000..c8abee64fb --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -0,0 +1,209 @@ +# This file is is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/local/share/containers/storage.conf +# /usr/local/etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "zfs" + +# Temporary storage location +runroot = "/var/run/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "/var/db/containers/storage" + +# Optional value for image storage location +# If set, it must be different than graphroot. + +# imagestore = "" + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = 0:1668442479:65536 +# remap-gids = 0:1668442479:65536 + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather then the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go new file mode 100644 index 0000000000..6753b296ff --- /dev/null +++ b/vendor/github.com/containers/storage/store.go @@ -0,0 +1,3621 @@ +package storage + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "time" + + // register all of the built-in drivers + _ "github.com/containers/storage/drivers/register" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/stringutils" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/types" + "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" +) + +type updateNameOperation int + +const ( + setNames updateNameOperation = iota + addNames + removeNames +) + +const ( + volatileFlag = "Volatile" + mountLabelFlag = "MountLabel" + processLabelFlag = "ProcessLabel" + mountOptsFlag = "MountOpts" +) + +var ( + stores []*store + storesLock sync.Mutex +) + +// roMetadataStore wraps a method for reading metadata associated with an ID. +type roMetadataStore interface { + // Metadata reads metadata associated with an item with the specified ID. + Metadata(id string) (string, error) +} + +// rwMetadataStore wraps a method for setting metadata associated with an ID. +type rwMetadataStore interface { + // SetMetadata updates the metadata associated with the item with the specified ID. + SetMetadata(id, metadata string) error +} + +// metadataStore wraps up methods for getting and setting metadata associated with IDs. +type metadataStore interface { + roMetadataStore + rwMetadataStore +} + +// An roBigDataStore wraps up the read-only big-data related methods of the +// various types of file-based lookaside stores that we implement. +type roBigDataStore interface { + // BigData retrieves a (potentially large) piece of data associated with + // this ID, if it has previously been set. + BigData(id, key string) ([]byte, error) + + // BigDataSize retrieves the size of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataSize(id, key string) (int64, error) + + // BigDataDigest retrieves the digest of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataDigest(id, key string) (digest.Digest, error) + + // BigDataNames() returns a list of the names of previously-stored pieces of + // data. + BigDataNames(id string) ([]string, error) +} + +// A rwImageBigDataStore wraps up how we store big-data associated with images. +type rwImageBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + // Pass github.com/containers/image/manifest.Digest as digestManifest + // to allow ByDigest to find images by their correct digests. + SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error +} + +// A containerBigDataStore wraps up how we store big-data associated with containers. +type containerBigDataStore interface { + roBigDataStore + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + SetBigData(id, key string, data []byte) error +} + +// A roLayerBigDataStore wraps up how we store RO big-data associated with layers. +type roLayerBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + BigData(id, key string) (io.ReadCloser, error) + + // BigDataNames() returns a list of the names of previously-stored pieces of + // data. + BigDataNames(id string) ([]string, error) +} + +// A rwLayerBigDataStore wraps up how we store big-data associated with layers. +type rwLayerBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + SetBigData(id, key string, data io.Reader) error +} + +// A flaggableStore can have flags set and cleared on items which it manages. +type flaggableStore interface { + // ClearFlag removes a named flag from an item in the store. + ClearFlag(id string, flag string) error + + // SetFlag sets a named flag and its value on an item in the store. + SetFlag(id string, flag string, value interface{}) error +} + +type StoreOptions = types.StoreOptions + +// Store wraps up the various types of file-based stores that we use into a +// singleton object that initializes and manages them all together. +type Store interface { + // RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve + // settings that were passed to GetStore() when the object was created. + RunRoot() string + GraphRoot() string + ImageStore() string + TransientStore() bool + GraphDriverName() string + GraphOptions() []string + PullOptions() map[string]string + UIDMap() []idtools.IDMap + GIDMap() []idtools.IDMap + + // GraphDriver obtains and returns a handle to the graph Driver object used + // by the Store. + GraphDriver() (drivers.Driver, error) + + // CreateLayer creates a new layer in the underlying storage driver, + // optionally having the specified ID (one will be assigned if none is + // specified), with the specified layer (or no layer) as its parent, + // and with optional names. (The writeable flag is ignored.) + CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) + + // PutLayer combines the functions of CreateLayer and ApplyDiff, + // marking the layer for automatic removal if applying the diff fails + // for any reason. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) + + // CreateImage creates a new image, optionally with the specified ID + // (one will be assigned if none is specified), with optional names, + // referring to a specified image, and with optional metadata. An + // image is a record which associates the ID of a layer with a + // additional bookkeeping information which the library stores for the + // convenience of its caller. + CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) + + // CreateContainer creates a new container, optionally with the + // specified ID (one will be assigned if none is specified), with + // optional names, using the specified image's top layer as the basis + // for the container's layer, and assigning the specified ID to that + // layer (one will be created if none is specified). A container is a + // layer which is associated with additional bookkeeping information + // which the library stores for the convenience of its caller. + CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + + // Metadata retrieves the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to). + Metadata(id string) (string, error) + + // SetMetadata updates the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to) to match + // the specified value. The metadata value can be retrieved at any + // time using Metadata, or using Layer, Image, or Container and reading + // the object directly. + SetMetadata(id, metadata string) error + + // Exists checks if there is a layer, image, or container which has the + // passed-in ID or name. + Exists(id string) bool + + // Status asks for a status report, in the form of key-value pairs, + // from the underlying storage driver. The contents vary from driver + // to driver. + Status() ([][2]string, error) + + // Delete removes the layer, image, or container which has the + // passed-in ID or name. Note that no safety checks are performed, so + // this can leave images with references to layers which do not exist, + // and layers with references to parents which no longer exist. + Delete(id string) error + + // DeleteLayer attempts to remove the specified layer. If the layer is the + // parent of any other layer, or is referred to by any images, it will return + // an error. + DeleteLayer(id string) error + + // DeleteImage removes the specified image if it is not referred to by + // any containers. If its top layer is then no longer referred to by + // any other images and is not the parent of any other layers, its top + // layer will be removed. If that layer's parent is no longer referred + // to by any other images and is not the parent of any other layers, + // then it, too, will be removed. This procedure will be repeated + // until a layer which should not be removed, or the base layer, is + // reached, at which point the list of removed layers is returned. If + // the commit argument is false, the image and layers are not removed, + // but the list of layers which would be removed is still returned. + DeleteImage(id string, commit bool) (layers []string, err error) + + // DeleteContainer removes the specified container and its layer. If + // there is no matching container, or if the container exists but its + // layer does not, an error will be returned. + DeleteContainer(id string) error + + // Wipe removes all known layers, images, and containers. + Wipe() error + + // MountImage mounts an image to temp directory and returns the mount point. + // MountImage allows caller to mount an image. Images will always + // be mounted read/only + MountImage(id string, mountOptions []string, mountLabel string) (string, error) + + // Unmount attempts to unmount an image, given an ID. + // Returns whether or not the layer is still mounted. + // WARNING: The return value may already be obsolete by the time it is available + // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored. + UnmountImage(id string, force bool) (bool, error) + + // Mount attempts to mount a layer, image, or container for access, and + // returns the pathname if it succeeds. + // Note if the mountLabel == "", the default label for the container + // will be used. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + Mount(id, mountLabel string) (string, error) + + // Unmount attempts to unmount a layer, image, or container, given an ID, a + // name, or a mount path. Returns whether or not the layer is still mounted. + // WARNING: The return value may already be obsolete by the time it is available + // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored. + Unmount(id string, force bool) (bool, error) + + // Mounted returns number of times the layer has been mounted. + // + // WARNING: This value might already be obsolete by the time it is returned; + // In situations where concurrent mount/unmount attempts can happen, this field + // should not be used for any decisions, maybe apart from heuristic user warnings. + Mounted(id string) (int, error) + + // Changes returns a summary of the changes which would need to be made + // to one layer to make its contents the same as a second layer. If + // the first layer is not specified, the second layer's parent is + // assumed. Each Change structure contains a Path relative to the + // layer's root directory, and a Kind which is either ChangeAdd, + // ChangeModify, or ChangeDelete. + Changes(from, to string) ([]archive.Change, error) + + // DiffSize returns a count of the size of the tarstream which would + // specify the changes returned by Changes. + DiffSize(from, to string) (int64, error) + + // Diff returns the tarstream which would specify the changes returned + // by Changes. If options are passed in, they can override default + // behaviors. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + + // ApplyDiff applies a tarstream to a layer. Information about the + // tarstream is cached with the layer. Typically, a layer which is + // populated using a tarstream will be expected to not be modified in + // any other way, either before or after the diff is applied. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + ApplyDiff(to string, diff io.Reader) (int64, error) + + // ApplyDiffer applies a diff to a layer. + // It is the caller responsibility to clean the staging directory if it is not + // successfully applied with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + + // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. + ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + + // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagingDirectory(stagingDirectory string) error + + // DifferTarget gets the path to the differ target. + DifferTarget(id string) (string, error) + + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // LayerSize returns a cached approximation of the layer's size, or -1 + // if we don't have a value on hand. + LayerSize(id string) (int64, error) + + // LayerParentOwners returns the UIDs and GIDs of owners of parents of + // the layer's mountpoint for which the layer's UID and GID maps (if + // any are defined) don't contain corresponding IDs. + LayerParentOwners(id string) ([]int, []int, error) + + // Layers returns a list of the currently known layers. + Layers() ([]Layer, error) + + // Images returns a list of the currently known images. + Images() ([]Image, error) + + // Containers returns a list of the currently known containers. + Containers() ([]Container, error) + + // Names returns the list of names for a layer, image, or container. + Names(id string) ([]string, error) + + // Free removes the store from the list of stores + Free() + + // SetNames changes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. + SetNames(id string, names []string) error + + // AddNames adds the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + AddNames(id string, names []string) error + + // RemoveNames removes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + RemoveNames(id string, names []string) error + + // ListImageBigData retrieves a list of the (possibly large) chunks of + // named data associated with an image. + ListImageBigData(id string) ([]string, error) + + // ImageBigData retrieves a (possibly large) chunk of named data + // associated with an image. + ImageBigData(id, key string) ([]byte, error) + + // ImageBigDataSize retrieves the size of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataSize(id, key string) (int64, error) + + // ImageBigDataDigest retrieves the digest of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataDigest(id, key string) (digest.Digest, error) + + // SetImageBigData stores a (possibly large) chunk of named data + // associated with an image. Pass + // github.com/containers/image/manifest.Digest as digestManifest to + // allow ImagesByDigest to find images by their correct digests. + SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error + + // ListLayerBigData retrieves a list of the (possibly large) chunks of + // named data associated with a layer. + ListLayerBigData(id string) ([]string, error) + + // LayerBigData retrieves a (possibly large) chunk of named data + // associated with a layer. + LayerBigData(id, key string) (io.ReadCloser, error) + + // SetLayerBigData stores a (possibly large) chunk of named data + // associated with a layer. + SetLayerBigData(id, key string, data io.Reader) error + + // ImageSize computes the size of the image's layers and ancillary data. + ImageSize(id string) (int64, error) + + // ListContainerBigData retrieves a list of the (possibly large) chunks of + // named data associated with a container. + ListContainerBigData(id string) ([]string, error) + + // ContainerBigData retrieves a (possibly large) chunk of named data + // associated with a container. + ContainerBigData(id, key string) ([]byte, error) + + // ContainerBigDataSize retrieves the size of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataSize(id, key string) (int64, error) + + // ContainerBigDataDigest retrieves the digest of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataDigest(id, key string) (digest.Digest, error) + + // SetContainerBigData stores a (possibly large) chunk of named data + // associated with a container. + SetContainerBigData(id, key string, data []byte) error + + // ContainerSize computes the size of the container's layer and ancillary + // data. Warning: this is a potentially expensive operation. + ContainerSize(id string) (int64, error) + + // Layer returns a specific layer. + Layer(id string) (*Layer, error) + + // Image returns a specific image. + Image(id string) (*Image, error) + + // ImagesByTopLayer returns a list of images which reference the specified + // layer as their top layer. They will have different IDs and names + // and may have different metadata, big data items, and flags. + ImagesByTopLayer(id string) ([]*Image, error) + + // ImagesByDigest returns a list of images which contain a big data item + // named ImageDigestBigDataKey whose contents have the specified digest. + ImagesByDigest(d digest.Digest) ([]*Image, error) + + // Container returns a specific container. + Container(id string) (*Container, error) + + // ContainerByLayer returns a specific container based on its layer ID or + // name. + ContainerByLayer(id string) (*Container, error) + + // ContainerDirectory returns a path of a directory which the caller + // can use to store data, specific to the container, which the library + // does not directly manage. The directory will be deleted when the + // container is deleted. + ContainerDirectory(id string) (string, error) + + // SetContainerDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // directory. + SetContainerDirectoryFile(id, file string, data []byte) error + + // FromContainerDirectory is a convenience function which reads + // the contents of the specified file relative to the container's + // directory. + FromContainerDirectory(id, file string) ([]byte, error) + + // ContainerRunDirectory returns a path of a directory which the + // caller can use to store data, specific to the container, which the + // library does not directly manage. The directory will be deleted + // when the host system is restarted. + ContainerRunDirectory(id string) (string, error) + + // SetContainerRunDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // run directory. + SetContainerRunDirectoryFile(id, file string, data []byte) error + + // FromContainerRunDirectory is a convenience function which reads + // the contents of the specified file relative to the container's run + // directory. + FromContainerRunDirectory(id, file string) ([]byte, error) + + // ContainerParentOwners returns the UIDs and GIDs of owners of parents + // of the container's layer's mountpoint for which the layer's UID and + // GID maps (if any are defined) don't contain corresponding IDs. + ContainerParentOwners(id string) ([]int, []int, error) + + // Lookup returns the ID of a layer, image, or container with the specified + // name or ID. + Lookup(name string) (string, error) + + // Shutdown attempts to free any kernel resources which are being used + // by the underlying driver. If "force" is true, any mounted (i.e., in + // use) layers are unmounted beforehand. If "force" is not true, then + // layers being in use is considered to be an error condition. A list + // of still-mounted layers is returned along with possible errors. + Shutdown(force bool) (layers []string, err error) + + // Version returns version information, in the form of key-value pairs, from + // the storage package. + Version() ([][2]string, error) + + // GetDigestLock returns digest-specific Locker. + GetDigestLock(digest.Digest) (Locker, error) + + // LayerFromAdditionalLayerStore searches the additional layer store and returns an object + // which can create a layer with the specified digest associated with the specified image + // reference. Note that this hasn't been stored to this store yet: the actual creation of + // a usable layer is done by calling the returned object's PutAs() method. After creating + // a layer, the caller must then call the object's Release() method to free any temporary + // resources which were allocated for the object by this method or the object's PutAs() + // method. + // This API is experimental and can be changed without bumping the major version number. + LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) + + // Tries to clean up remainders of previous containers or layers that are not + // references in the json files. These can happen in the case of unclean + // shutdowns or regular restarts in transient store mode. + GarbageCollect() error + + // Check returns a report of things that look wrong in the store. + Check(options *CheckOptions) (CheckReport, error) + // Repair attempts to remediate problems mentioned in the CheckReport, + // usually by deleting layers and images which are damaged. If the + // right options are set, it will remove containers as well. + Repair(report CheckReport, options *RepairOptions) []error +} + +// AdditionalLayer represents a layer that is contained in the additional layer store +// This API is experimental and can be changed without bumping the major version number. +type AdditionalLayer interface { + // PutAs creates layer based on this handler, using diff contents from the additional + // layer store. + PutAs(id, parent string, names []string) (*Layer, error) + + // UncompressedDigest returns the uncompressed digest of this layer + UncompressedDigest() digest.Digest + + // CompressedSize returns the compressed size of this layer + CompressedSize() int64 + + // Release tells the additional layer store that we don't use this handler. + Release() +} + +type AutoUserNsOptions = types.AutoUserNsOptions + +type IDMappingOptions = types.IDMappingOptions + +// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods. +type LayerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this layer. If nothing is specified, the layer will + // inherit settings from its parent layer or, if it has no parent + // layer, the Store object. + types.IDMappingOptions + // TemplateLayer is the ID of a layer whose contents will be used to + // initialize this layer. If set, it should be a child of the layer + // which we want to use as the parent of the new layer. + TemplateLayer string + // OriginalDigest specifies a digest of the tarstream (diff), if one is + // provided along with these LayerOptions, and reliably known by the caller. + // Use the default "" if this fields is not applicable or the value is not known. + OriginalDigest digest.Digest + // UncompressedDigest specifies a digest of the uncompressed version (“DiffID”) + // of the tarstream (diff), if one is provided along with these LayerOptions, + // and reliably known by the caller. + // Use the default "" if this fields is not applicable or the value is not known. + UncompressedDigest digest.Digest + // True is the layer info can be treated as volatile + Volatile bool + // BigData is a set of items which should be stored with the layer. + BigData []LayerBigDataOption + // Flags is a set of named flags and their values to store with the layer. + // Currently these can only be set when the layer record is created, but that + // could change in the future. + Flags map[string]interface{} +} + +type LayerBigDataOption struct { + Key string + Data io.Reader +} + +// ImageOptions is used for passing options to a Store's CreateImage() method. +type ImageOptions struct { + // CreationDate, if not zero, will override the default behavior of marking the image as having been + // created when CreateImage() was called, recording CreationDate instead. + CreationDate time.Time + // Digest is a hard-coded digest value that we can use to look up the image. It is optional. + Digest digest.Digest + // Digests is a list of digest values of the image's manifests, and + // possibly a manually-specified value, that we can use to locate the + // image. If Digest is set, its value is also in this list. + Digests []digest.Digest + // Metadata is caller-specified metadata associated with the layer. + Metadata string + // BigData is a set of items which should be stored with the image. + BigData []ImageBigDataOption + // NamesHistory is used for guessing for what this image was named when a container was created based + // on it, but it no longer has any names. + NamesHistory []string + // Flags is a set of named flags and their values to store with the image. Currently these can only + // be set when the image record is created, but that could change in the future. + Flags map[string]interface{} +} + +type ImageBigDataOption struct { + Key string + Data []byte + Digest digest.Digest +} + +// ContainerOptions is used for passing options to a Store's CreateContainer() method. +type ContainerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this container's layer. If nothing is specified, the + // container's layer will inherit settings from the image's top layer + // or, if it is not being created based on an image, the Store object. + types.IDMappingOptions + LabelOpts []string + // Flags is a set of named flags and their values to store with the container. + // Currently these can only be set when the container record is created, but that + // could change in the future. + Flags map[string]interface{} + MountOpts []string + Volatile bool + StorageOpt map[string]string + // Metadata is caller-specified metadata associated with the container. + Metadata string + // BigData is a set of items which should be stored for the container. + BigData []ContainerBigDataOption +} + +type ContainerBigDataOption struct { + Key string + Data []byte +} + +type store struct { + // # Locking hierarchy: + // These locks do not all need to be held simultaneously, but if some code does need to lock more than one, it MUST do so in this order: + // - graphLock + // - layerStore.start{Reading,Writing} + // - roLayerStores[].startReading (in the order of the items of the roLayerStores array) + // - imageStore.start{Reading,Writing} + // - roImageStores[].startReading (in the order of the items of the roImageStores array) + // - containerStore.start{Reading,Writing} + + // The following fields are only set when constructing store, and must never be modified afterwards. + // They are safe to access without any other locking. + runRoot string + graphDriverName string // Initially set to the user-requested value, possibly ""; updated during store construction, and does not change afterwards. + graphDriverPriority []string + // graphLock: + // - Ensures that we always reload graphDriver, and the primary layer store, after any process does store.Shutdown. This is necessary + // because (??) the Shutdown may forcibly unmount and clean up, affecting graph driver state in a way only a graph driver + // and layer store reinitialization can notice. + // - Ensures that store.Shutdown is exclusive with mount operations. This is necessary at because some + // graph drivers call mount.MakePrivate() during initialization, the mount operations require that, and the driver’s Cleanup() method + // may undo that. So, holding graphLock is required throughout the duration of Shutdown(), and the duration of any mount + // (but not unmount) calls. + // - Within this store object, protects access to some related in-memory state. + graphLock *lockfile.LockFile + usernsLock *lockfile.LockFile + graphRoot string + graphOptions []string + imageStoreDir string + pullOptions map[string]string + uidMap []idtools.IDMap + gidMap []idtools.IDMap + autoUsernsUser string + autoNsMinSize uint32 + autoNsMaxSize uint32 + imageStore rwImageStore + rwImageStores []rwImageStore + roImageStores []roImageStore + containerStore rwContainerStore + digestLockRoot string + disableVolatile bool + transientStore bool + + // The following fields can only be accessed with graphLock held. + graphLockLastWrite lockfile.LastWrite + // FIXME: This field is only set when holding graphLock, but locking rules of the driver + // interface itself are not documented here. It is extensively used without holding graphLock. + graphDriver drivers.Driver + layerStoreUseGetters rwLayerStore // Almost all users should use the provided accessors instead of accessing this field directly. + roLayerStoresUseGetters []roLayerStore // Almost all users should use the provided accessors instead of accessing this field directly. + + // FIXME: The following fields need locking, and don’t have it. + additionalUIDs *idSet // Set by getAvailableIDs() + additionalGIDs *idSet // Set by getAvailableIDs() +} + +// GetStore attempts to find an already-created Store object matching the +// specified location and graph driver, and if it can't, it creates and +// initializes a new Store object, and the underlying storage that it controls. +// +// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used. +// +// These defaults observe environment variables: +// - `STORAGE_DRIVER` for the name of the storage driver to attempt to use +// - `STORAGE_OPTS` for the string of options to pass to the driver +// +// Note that we do some of this work in a child process. The calling process's +// main() function needs to import our pkg/reexec package and should begin with +// something like this in order to allow us to properly start that child +// process: +// +// if reexec.Init() { +// return +// } +func GetStore(options types.StoreOptions) (Store, error) { + defaultOpts, err := types.Options() + if err != nil { + return nil, err + } + if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { + options = defaultOpts + } + + if options.GraphRoot != "" { + dir, err := filepath.Abs(options.GraphRoot) + if err != nil { + return nil, err + } + options.GraphRoot = dir + } + if options.RunRoot != "" { + dir, err := filepath.Abs(options.RunRoot) + if err != nil { + return nil, err + } + options.RunRoot = dir + } + + storesLock.Lock() + defer storesLock.Unlock() + + // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first + for _, s := range stores { + if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + return s, nil + } + } + + // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither. + switch { + case options.RunRoot == "" && options.GraphRoot == "": + return nil, fmt.Errorf("no storage runroot or graphroot specified: %w", ErrIncompleteOptions) + case options.GraphRoot == "": + options.GraphRoot = defaultOpts.GraphRoot + case options.RunRoot == "": + options.RunRoot = defaultOpts.RunRoot + } + + if err := os.MkdirAll(options.RunRoot, 0o700); err != nil { + return nil, err + } + if err := os.MkdirAll(options.GraphRoot, 0o700); err != nil { + return nil, err + } + if options.ImageStore != "" { + if err := os.MkdirAll(options.ImageStore, 0o700); err != nil { + return nil, err + } + } + if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0o700); err != nil { + return nil, err + } + if options.ImageStore != "" { + if err := os.MkdirAll(filepath.Join(options.ImageStore, options.GraphDriverName), 0o700); err != nil { + return nil, err + } + } + + graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock")) + if err != nil { + return nil, err + } + + usernsLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "userns.lock")) + if err != nil { + return nil, err + } + + autoNsMinSize := options.AutoNsMinSize + autoNsMaxSize := options.AutoNsMaxSize + if autoNsMinSize == 0 { + autoNsMinSize = AutoUserNsMinSize + } + if autoNsMaxSize == 0 { + autoNsMaxSize = AutoUserNsMaxSize + } + s := &store{ + runRoot: options.RunRoot, + graphDriverName: options.GraphDriverName, + graphDriverPriority: options.GraphDriverPriority, + graphLock: graphLock, + usernsLock: usernsLock, + graphRoot: options.GraphRoot, + graphOptions: options.GraphDriverOptions, + imageStoreDir: options.ImageStore, + pullOptions: options.PullOptions, + uidMap: copyIDMap(options.UIDMap), + gidMap: copyIDMap(options.GIDMap), + autoUsernsUser: options.RootAutoNsUser, + autoNsMinSize: autoNsMinSize, + autoNsMaxSize: autoNsMaxSize, + disableVolatile: options.DisableVolatile, + transientStore: options.TransientStore, + + additionalUIDs: nil, + additionalGIDs: nil, + } + if err := s.load(); err != nil { + return nil, err + } + + stores = append(stores, s) + + return s, nil +} + +func copyUint32Slice(slice []uint32) []uint32 { + m := []uint32{} + if slice != nil { + m = make([]uint32, len(slice)) + copy(m, slice) + } + if len(m) > 0 { + return m[:] + } + return nil +} + +func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap { + m := []idtools.IDMap{} + if idmap != nil { + m = make([]idtools.IDMap, len(idmap)) + copy(m, idmap) + } + if len(m) > 0 { + return m[:] + } + return nil +} + +func (s *store) RunRoot() string { + return s.runRoot +} + +func (s *store) GraphDriverName() string { + return s.graphDriverName +} + +func (s *store) GraphRoot() string { + return s.graphRoot +} + +func (s *store) ImageStore() string { + return s.imageStoreDir +} + +func (s *store) TransientStore() bool { + return s.transientStore +} + +func (s *store) GraphOptions() []string { + return s.graphOptions +} + +func (s *store) PullOptions() map[string]string { + cp := make(map[string]string, len(s.pullOptions)) + for k, v := range s.pullOptions { + cp[k] = v + } + return cp +} + +func (s *store) UIDMap() []idtools.IDMap { + return copyIDMap(s.uidMap) +} + +func (s *store) GIDMap() []idtools.IDMap { + return copyIDMap(s.gidMap) +} + +// This must only be called when constructing store; it writes to fields that are assumed to be constant after construction. +func (s *store) load() error { + var driver drivers.Driver + if err := func() error { // A scope for defer + s.graphLock.Lock() + defer s.graphLock.Unlock() + lastWrite, err := s.graphLock.GetLastWrite() + if err != nil { + return err + } + s.graphLockLastWrite = lastWrite + driver, err = s.createGraphDriverLocked() + if err != nil { + return err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + return nil + }(); err != nil { + return err + } + driverPrefix := s.graphDriverName + "-" + + imgStoreRoot := s.imageStoreDir + if imgStoreRoot == "" { + imgStoreRoot = s.graphRoot + } + gipath := filepath.Join(imgStoreRoot, driverPrefix+"images") + if err := os.MkdirAll(gipath, 0o700); err != nil { + return err + } + ris, err := newImageStore(gipath) + if err != nil { + return err + } + s.imageStore = ris + + gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") + if err := os.MkdirAll(gcpath, 0o700); err != nil { + return err + } + rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") + if err := os.MkdirAll(rcpath, 0o700); err != nil { + return err + } + + rcs, err := newContainerStore(gcpath, rcpath, s.transientStore) + if err != nil { + return err + } + + s.containerStore = rcs + + for _, store := range driver.AdditionalImageStores() { + gipath := filepath.Join(store, driverPrefix+"images") + var ris roImageStore + if s.imageStoreDir != "" && store == s.graphRoot { + // If --imagestore was set and current store + // is `graphRoot` then mount it as a `rw` additional + // store instead of `readonly` additional store. + imageStore, err := newImageStore(gipath) + if err != nil { + return err + } + s.rwImageStores = append(s.rwImageStores, imageStore) + ris = imageStore + } else { + ris, err = newROImageStore(gipath) + if err != nil { + return err + } + } + s.roImageStores = append(s.roImageStores, ris) + } + + s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") + if err := os.MkdirAll(s.digestLockRoot, 0o700); err != nil { + return err + } + + return nil +} + +// GetDigestLock returns a digest-specific Locker. +func (s *store) GetDigestLock(d digest.Digest) (Locker, error) { + return lockfile.GetLockFile(filepath.Join(s.digestLockRoot, d.String())) +} + +// startUsingGraphDriver obtains s.graphLock and ensures that s.graphDriver is set and fresh. +// It only intended to be used on a fully-constructed store. +// If this succeeds, the caller MUST call stopUsingGraphDriver(). +func (s *store) startUsingGraphDriver() error { + s.graphLock.Lock() + succeeded := false + defer func() { + if !succeeded { + s.graphLock.Unlock() + } + }() + + lastWrite, modified, err := s.graphLock.ModifiedSince(s.graphLockLastWrite) + if err != nil { + return err + } + if modified { + driver, err := s.createGraphDriverLocked() + if err != nil { + return err + } + // Our concurrency design requires s.graphDriverName not to be modified after + // store is constructed. + // It’s fine for driver.String() not to match the requested graph driver name + // (e.g. if the user asks for overlay2 and gets overlay), but it must be an idempotent + // mapping: + // driver1 := drivers.New(userInput, config) + // name1 := driver1.String() + // name2 := drivers.New(name1, config).String() + // assert(name1 == name2) + if s.graphDriverName != driver.String() { + return fmt.Errorf("graph driver name changed from %q to %q during reload", + s.graphDriverName, driver.String()) + } + s.graphDriver = driver + s.layerStoreUseGetters = nil + s.graphLockLastWrite = lastWrite + } + + succeeded = true + return nil +} + +// stopUsingGraphDriver releases graphLock obtained by startUsingGraphDriver. +func (s *store) stopUsingGraphDriver() { + s.graphLock.Unlock() +} + +// createGraphDriverLocked creates a new instance of graph driver for s, and returns it. +// Almost all users should use startUsingGraphDriver instead. +// The caller must hold s.graphLock. +func (s *store) createGraphDriverLocked() (drivers.Driver, error) { + driverRoot := s.imageStoreDir + imageStoreBase := s.graphRoot + if driverRoot == "" { + driverRoot = s.graphRoot + imageStoreBase = "" + } + config := drivers.Options{ + Root: driverRoot, + ImageStore: imageStoreBase, + RunRoot: s.runRoot, + DriverPriority: s.graphDriverPriority, + DriverOptions: s.graphOptions, + UIDMaps: s.uidMap, + GIDMaps: s.gidMap, + } + return drivers.New(s.graphDriverName, config) +} + +func (s *store) GraphDriver() (drivers.Driver, error) { + if err := s.startUsingGraphDriver(); err != nil { + return nil, err + } + defer s.stopUsingGraphDriver() + return s.graphDriver, nil +} + +// getLayerStoreLocked obtains and returns a handle to the writeable layer store object +// used by the Store. +// It must be called with s.graphLock held. +func (s *store) getLayerStoreLocked() (rwLayerStore, error) { + if s.layerStoreUseGetters != nil { + return s.layerStoreUseGetters, nil + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0o700); err != nil { + return nil, err + } + imgStoreRoot := s.imageStoreDir + if imgStoreRoot == "" { + imgStoreRoot = s.graphRoot + } + glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers") + if err := os.MkdirAll(glpath, 0o700); err != nil { + return nil, err + } + rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore) + if err != nil { + return nil, err + } + s.layerStoreUseGetters = rls + return s.layerStoreUseGetters, nil +} + +// getLayerStore obtains and returns a handle to the writeable layer store object +// used by the store. +// It must be called WITHOUT s.graphLock held. +func (s *store) getLayerStore() (rwLayerStore, error) { + if err := s.startUsingGraphDriver(); err != nil { + return nil, err + } + defer s.stopUsingGraphDriver() + return s.getLayerStoreLocked() +} + +// getROLayerStoresLocked obtains additional read/only layer store objects used by the +// Store. +// It must be called with s.graphLock held. +func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) { + if s.roLayerStoresUseGetters != nil { + return s.roLayerStoresUseGetters, nil + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0o700); err != nil { + return nil, err + } + for _, store := range s.graphDriver.AdditionalImageStores() { + glpath := filepath.Join(store, driverPrefix+"layers") + rls, err := newROLayerStore(rlpath, glpath, s.graphDriver) + if err != nil { + return nil, err + } + s.roLayerStoresUseGetters = append(s.roLayerStoresUseGetters, rls) + } + return s.roLayerStoresUseGetters, nil +} + +// bothLayerStoreKindsLocked returns the primary, and additional read-only, layer store objects used by the store. +// It must be called with s.graphLock held. +func (s *store) bothLayerStoreKindsLocked() (rwLayerStore, []roLayerStore, error) { + primary, err := s.getLayerStoreLocked() + if err != nil { + return nil, nil, fmt.Errorf("loading primary layer store data: %w", err) + } + additional, err := s.getROLayerStoresLocked() + if err != nil { + return nil, nil, fmt.Errorf("loading additional layer stores: %w", err) + } + return primary, additional, nil +} + +// bothLayerStoreKinds returns the primary, and additional read-only, layer store objects used by the store. +// It must be called WITHOUT s.graphLock held. +func (s *store) bothLayerStoreKinds() (rwLayerStore, []roLayerStore, error) { + if err := s.startUsingGraphDriver(); err != nil { + return nil, nil, err + } + defer s.stopUsingGraphDriver() + return s.bothLayerStoreKindsLocked() +} + +// allLayerStores returns a list of all layer store objects used by the Store. +// This is a convenience method for read-only users of the Store. +// It must be called with s.graphLock held. +func (s *store) allLayerStoresLocked() ([]roLayerStore, error) { + primary, additional, err := s.bothLayerStoreKindsLocked() + if err != nil { + return nil, err + } + return append([]roLayerStore{primary}, additional...), nil +} + +// allLayerStores returns a list of all layer store objects used by the Store. +// This is a convenience method for read-only users of the Store. +// It must be called WITHOUT s.graphLock held. +func (s *store) allLayerStores() ([]roLayerStore, error) { + if err := s.startUsingGraphDriver(); err != nil { + return nil, err + } + defer s.stopUsingGraphDriver() + return s.allLayerStoresLocked() +} + +// readAllLayerStores processes allLayerStores() in order: +// It locks the store for reading, checks for updates, and calls +// +// (data, done, err) := fn(store) +// +// until the callback returns done == true, and returns the data from the callback. +// +// If reading any layer store fails, it immediately returns ({}, true, err). +// +// If all layer stores are processed without setting done == true, it returns ({}, false, nil). +// +// Typical usage: +// +// if res, done, err := s.readAllLayerStores(store, func(…) { +// … +// }; done { +// return res, err +// } +func readAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, error)) (T, bool, error) { + var zeroRes T // A zero value of T + + layerStores, err := s.allLayerStores() + if err != nil { + return zeroRes, true, err + } + for _, s := range layerStores { + store := s + if err := store.startReading(); err != nil { + return zeroRes, true, err + } + defer store.stopReading() + if res, done, err := fn(store); done { + return res, true, err + } + } + return zeroRes, false, nil +} + +// writeToLayerStore is a helper for working with store.getLayerStore(): +// It locks the store for writing, checks for updates, and calls fn() +// It returns the return value of fn, or its own error initializing the store. +func writeToLayerStore[T any](s *store, fn func(store rwLayerStore) (T, error)) (T, error) { + var zeroRes T // A zero value of T + + store, err := s.getLayerStore() + if err != nil { + return zeroRes, err + } + + if err := store.startWriting(); err != nil { + return zeroRes, err + } + defer store.stopWriting() + return fn(store) +} + +// readOrWriteAllLayerStores processes allLayerStores() in order: +// It locks the writeable store for writing and all others for reading, checks +// for updates, and calls +// +// (data, done, err) := fn(store) +// +// until the callback returns done == true, and returns the data from the callback. +// +// If reading or writing any layer store fails, it immediately returns ({}, true, err). +// +// If all layer stores are processed without setting done == true, it returns ({}, false, nil). +// +// Typical usage: +// +// if res, done, err := s.readOrWriteAllLayerStores(store, func(…) { +// … +// }; done { +// return res, err +// } +func readOrWriteAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, error)) (T, bool, error) { + var zeroRes T // A zero value of T + + rwLayerStore, roLayerStores, err := s.bothLayerStoreKinds() + if err != nil { + return zeroRes, true, err + } + + if err := rwLayerStore.startWriting(); err != nil { + return zeroRes, true, err + } + defer rwLayerStore.stopWriting() + if res, done, err := fn(rwLayerStore); done { + return res, true, err + } + + for _, s := range roLayerStores { + store := s + if err := store.startReading(); err != nil { + return zeroRes, true, err + } + defer store.stopReading() + if res, done, err := fn(store); done { + return res, true, err + } + } + return zeroRes, false, nil +} + +// allImageStores returns a list of all image store objects used by the Store. +// This is a convenience method for read-only users of the Store. +func (s *store) allImageStores() []roImageStore { + return append([]roImageStore{s.imageStore}, s.roImageStores...) +} + +// readAllImageStores processes allImageStores() in order: +// It locks the store for reading, checks for updates, and calls +// +// (data, done, err) := fn(store) +// +// until the callback returns done == true, and returns the data from the callback. +// +// If reading any Image store fails, it immediately returns ({}, true, err). +// +// If all Image stores are processed without setting done == true, it returns ({}, false, nil). +// +// Typical usage: +// +// if res, done, err := readAllImageStores(store, func(…) { +// … +// }; done { +// return res, err +// } +func readAllImageStores[T any](s *store, fn func(store roImageStore) (T, bool, error)) (T, bool, error) { + var zeroRes T // A zero value of T + + for _, s := range s.allImageStores() { + store := s + if err := store.startReading(); err != nil { + return zeroRes, true, err + } + defer store.stopReading() + if res, done, err := fn(store); done { + return res, true, err + } + } + return zeroRes, false, nil +} + +// writeToImageStore is a convenience helper for working with store.imageStore: +// It locks the store for writing, checks for updates, and calls fn(), which can then access store.imageStore. +// It returns the return value of fn, or its own error initializing the store. +func writeToImageStore[T any](s *store, fn func() (T, error)) (T, error) { + if err := s.imageStore.startWriting(); err != nil { + var zeroRes T // A zero value of T + return zeroRes, err + } + defer s.imageStore.stopWriting() + return fn() +} + +// readContainerStore is a convenience helper for working with store.containerStore: +// It locks the store for reading, checks for updates, and calls fn(), which can then access store.containerStore. +// If reading the container store fails, it returns ({}, true, err). +// Returns the return value of fn on success. +func readContainerStore[T any](s *store, fn func() (T, bool, error)) (T, bool, error) { + if err := s.containerStore.startReading(); err != nil { + var zeroRes T // A zero value of T + return zeroRes, true, err + } + defer s.containerStore.stopReading() + return fn() +} + +// writeToContainerStore is a convenience helper for working with store.containerStore: +// It locks the store for writing, checks for updates, and calls fn(), which can then access store.containerStore. +// It returns the return value of fn, or its own error initializing the store. +func writeToContainerStore[T any](s *store, fn func() (T, error)) (T, error) { + if err := s.containerStore.startWriting(); err != nil { + var zeroRes T // A zero value of T + return zeroRes, err + } + defer s.containerStore.stopWriting() + return fn() +} + +// writeToAllStores is a convenience helper for writing to all three stores: +// It locks the stores for writing, checks for updates, and calls fn(), which can then access the provided layer store, +// s.imageStore and s.containerStore. +// It returns the return value of fn, or its own error initializing the stores. +func (s *store) writeToAllStores(fn func(rlstore rwLayerStore) error) error { + rlstore, err := s.getLayerStore() + if err != nil { + return err + } + + if err := rlstore.startWriting(); err != nil { + return err + } + defer rlstore.stopWriting() + if err := s.imageStore.startWriting(); err != nil { + return err + } + defer s.imageStore.stopWriting() + if err := s.containerStore.startWriting(); err != nil { + return err + } + defer s.containerStore.stopWriting() + + return fn(rlstore) +} + +// canUseShifting returns true if we can use mount-time arguments (shifting) to +// avoid having to create a mapped top layer for a base image when we want to +// use it to create a container using ID mappings. +// On entry: +// - rlstore must be locked for writing +func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { + if !s.graphDriver.SupportsShifting() { + return false + } + if uidmap != nil && !idtools.IsContiguous(uidmap) { + return false + } + if gidmap != nil && !idtools.IsContiguous(gidmap) { + return false + } + return true +} + +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) { + var parentLayer *Layer + rlstore, rlstores, err := s.bothLayerStoreKinds() + if err != nil { + return nil, -1, err + } + if err := rlstore.startWriting(); err != nil { + return nil, -1, err + } + defer rlstore.stopWriting() + if err := s.containerStore.startWriting(); err != nil { + return nil, -1, err + } + defer s.containerStore.stopWriting() + var options LayerOptions + if lOptions != nil { + options = *lOptions + options.BigData = copyLayerBigDataOptionSlice(lOptions.BigData) + options.Flags = copyStringInterfaceMap(lOptions.Flags) + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } + uidMap := options.UIDMap + gidMap := options.GIDMap + if parent != "" { + var ilayer *Layer + for _, l := range append([]roLayerStore{rlstore}, rlstores...) { + lstore := l + if lstore != rlstore { + if err := lstore.startReading(); err != nil { + return nil, -1, err + } + defer lstore.stopReading() + } + if l, err := lstore.Get(parent); err == nil && l != nil { + ilayer = l + parent = ilayer.ID + break + } + } + if ilayer == nil { + return nil, -1, ErrLayerUnknown + } + parentLayer = ilayer + containers, err := s.containerStore.Containers() + if err != nil { + return nil, -1, err + } + for _, container := range containers { + if container.LayerID == parent { + return nil, -1, ErrParentIsContainer + } + } + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } else { + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } + } + layerOptions := LayerOptions{ + OriginalDigest: options.OriginalDigest, + UncompressedDigest: options.UncompressedDigest, + Flags: options.Flags, + } + if s.canUseShifting(uidMap, gidMap) { + layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} + } else { + layerOptions.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + } + } + return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff) +} + +func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { + layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, options, nil) + return layer, err +} + +func (s *store) CreateImage(id string, names []string, layer, metadata string, iOptions *ImageOptions) (*Image, error) { + if layer != "" { + layerStores, err := s.allLayerStores() + if err != nil { + return nil, err + } + var ilayer *Layer + for _, s := range layerStores { + store := s + if err := store.startReading(); err != nil { + return nil, err + } + defer store.stopReading() + ilayer, err = store.Get(layer) + if err == nil { + break + } + } + if ilayer == nil { + return nil, ErrLayerUnknown + } + layer = ilayer.ID + } + + return writeToImageStore(s, func() (*Image, error) { + var options ImageOptions + var namesToAddAfterCreating []string + + // Check if the ID refers to an image in a read-only store -- we want + // to allow images in read-only stores to have their names changed, so + // if we find one, merge the new values in with what we know about the + // image that's already there. + if id != "" { + for _, is := range s.roImageStores { + store := is + if err := store.startReading(); err != nil { + return nil, err + } + defer store.stopReading() + if i, err := store.Get(id); err == nil { + // set information about this image in "options" + options = ImageOptions{ + Metadata: i.Metadata, + CreationDate: i.Created, + Digest: i.Digest, + Digests: copyDigestSlice(i.Digests), + NamesHistory: copyStringSlice(i.NamesHistory), + } + for _, key := range i.BigDataNames { + data, err := store.BigData(id, key) + if err != nil { + return nil, err + } + dataDigest, err := store.BigDataDigest(id, key) + if err != nil { + return nil, err + } + options.BigData = append(options.BigData, ImageBigDataOption{ + Key: key, + Data: data, + Digest: dataDigest, + }) + } + namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...)) + break + } + } + } + + // merge any passed-in options into "options" as best we can + if iOptions != nil { + if !iOptions.CreationDate.IsZero() { + options.CreationDate = iOptions.CreationDate + } + if iOptions.Digest != "" { + options.Digest = iOptions.Digest + } + options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...) + if iOptions.Metadata != "" { + options.Metadata = iOptions.Metadata + } + options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...) + options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...) + if options.Flags == nil { + options.Flags = make(map[string]interface{}) + } + for k, v := range iOptions.Flags { + options.Flags[k] = v + } + } + + if options.CreationDate.IsZero() { + options.CreationDate = time.Now().UTC() + } + if metadata != "" { + options.Metadata = metadata + } + + res, err := s.imageStore.create(id, names, layer, options) + if err == nil && len(namesToAddAfterCreating) > 0 { + // set any names we pulled up from an additional image store, now that we won't be + // triggering a duplicate names error + err = s.imageStore.updateNames(res.ID, namesToAddAfterCreating, addNames) + } + return res, err + }) +} + +// imageTopLayerForMapping locates the layer that can take the place of the +// image's top layer as the shared parent layer for a one or more containers +// which are using ID mappings. +// On entry: +// - ristore must be locked EITHER for reading or writing +// - s.imageStore must be locked for writing; it might be identical to ristore. +// - rlstore must be locked for writing +// - lstores must all be locked for reading +func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) { + layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool { + // If the driver supports shifting and the layer has no mappings, we can use it. + if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + return true + } + // If we want host mapping, and the layer uses mappings, it's not the best match. + if options.HostUIDMapping && len(layer.UIDMap) != 0 { + return false + } + if options.HostGIDMapping && len(layer.GIDMap) != 0 { + return false + } + // Compare the maps. + return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) + } + var layer, parentLayer *Layer + allStores := append([]roLayerStore{rlstore}, lstores...) + // Locate the image's top layer and its parent, if it has one. + createMappedLayer := ristore == s.imageStore + for _, s := range allStores { + store := s + // Walk the top layer list. + for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if cLayer, err := store.Get(candidate); err == nil { + // We want the layer's parent, too, if it has one. + var cParentLayer *Layer + if cLayer.Parent != "" { + // Its parent should be in one of the stores, somewhere. + for _, ps := range allStores { + if cParentLayer, err = ps.Get(cLayer.Parent); err == nil { + break + } + } + if cParentLayer == nil { + continue + } + } + // If the layer matches the desired mappings, it's a perfect match, + // so we're actually done here. + if layerMatchesMappingOptions(cLayer, options) { + return cLayer, nil + } + // Record the first one that we found, even if it's not ideal, so that + // we have a starting point. + if layer == nil { + layer = cLayer + parentLayer = cParentLayer + if store != rlstore { + // The layer is in another store, so we cannot + // create a mapped version of it to the image. + createMappedLayer = false + } + } + } + } + } + if layer == nil { + return nil, ErrLayerUnknown + } + // The top layer's mappings don't match the ones we want, but it's in a read-only + // image store, so we can't create and add a mapped copy of the layer to the image. + // We'll have to do the mapping for the container itself, elsewhere. + if !createMappedLayer { + return layer, nil + } + // The top layer's mappings don't match the ones we want, and it's in an image store + // that lets us edit image metadata, so create a duplicate of the layer with the desired + // mappings, and register it as an alternate top layer in the image. + var layerOptions LayerOptions + if s.canUseShifting(options.UIDMap, options.GIDMap) { + layerOptions.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + } + } else { + layerOptions.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + } + layerOptions.TemplateLayer = layer.ID + mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil) + if err != nil { + return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err) + } + // By construction, createMappedLayer can only be true if ristore == s.imageStore. + if err = s.imageStore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { + if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { + err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err) + } + return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err) + } + return mappedLayer, nil +} + +func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, cOptions *ContainerOptions) (*Container, error) { + var options ContainerOptions + if cOptions != nil { + options = *cOptions + options.IDMappingOptions.UIDMap = copyIDMap(cOptions.IDMappingOptions.UIDMap) + options.IDMappingOptions.GIDMap = copyIDMap(cOptions.IDMappingOptions.GIDMap) + options.LabelOpts = copyStringSlice(cOptions.LabelOpts) + options.Flags = copyStringInterfaceMap(cOptions.Flags) + options.MountOpts = copyStringSlice(cOptions.MountOpts) + options.StorageOpt = copyStringStringMap(cOptions.StorageOpt) + options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData) + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } + options.Metadata = metadata + rlstore, lstores, err := s.bothLayerStoreKinds() // lstores will be locked read-only if image != "" + if err != nil { + return nil, err + } + + var imageTopLayer *Layer + imageID := "" + + if options.AutoUserNs || options.UIDMap != nil || options.GIDMap != nil { + // Prevent multiple instances to retrieve the same range when AutoUserNs + // are used. + // It doesn't prevent containers that specify an explicit mapping to overlap + // with AutoUserNs. + s.usernsLock.Lock() + defer s.usernsLock.Unlock() + } + + var imageHomeStore roImageStore // Set if image != "" + // s.imageStore is locked read-write, if image != "" + // s.roImageStores are NOT NECESSARILY ALL locked read-only if image != "" + var cimage *Image // Set if image != "" + if image != "" { + if err := rlstore.startWriting(); err != nil { + return nil, err + } + defer rlstore.stopWriting() + for _, s := range lstores { + store := s + if err := store.startReading(); err != nil { + return nil, err + } + defer store.stopReading() + } + if err := s.imageStore.startWriting(); err != nil { + return nil, err + } + defer s.imageStore.stopWriting() + cimage, err = s.imageStore.Get(image) + if err == nil { + imageHomeStore = s.imageStore + } else { + for _, s := range s.roImageStores { + store := s + if err := store.startReading(); err != nil { + return nil, err + } + defer store.stopReading() + cimage, err = store.Get(image) + if err == nil { + imageHomeStore = store + break + } + } + } + if cimage == nil { + return nil, fmt.Errorf("locating image with ID %q: %w", image, ErrImageUnknown) + } + imageID = cimage.ID + } + + if options.AutoUserNs { + var err error + options.UIDMap, options.GIDMap, err = s.getAutoUserNS(&options.AutoUserNsOpts, cimage, rlstore, lstores) + if err != nil { + return nil, err + } + } + + uidMap := options.UIDMap + gidMap := options.GIDMap + + idMappingsOptions := options.IDMappingOptions + if image != "" { + if cimage.TopLayer != "" { + ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, rlstore, lstores, idMappingsOptions) + if err != nil { + return nil, err + } + imageTopLayer = ilayer + + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } + } else { + if err := rlstore.startWriting(); err != nil { + return nil, err + } + defer rlstore.stopWriting() + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } + } + layerOptions := &LayerOptions{ + // Normally layers for containers are volatile only if the container is. + // But in transient store mode, all container layers are volatile. + Volatile: options.Volatile || s.transientStore, + } + if s.canUseShifting(uidMap, gidMap) { + layerOptions.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + } + } else { + layerOptions.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: idMappingsOptions.HostUIDMapping, + HostGIDMapping: idMappingsOptions.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + } + } + if options.Flags == nil { + options.Flags = make(map[string]interface{}) + } + plabel, _ := options.Flags[processLabelFlag].(string) + mlabel, _ := options.Flags[mountLabelFlag].(string) + if (plabel == "" && mlabel != "") || (plabel != "" && mlabel == "") { + return nil, errors.New("ProcessLabel and Mountlabel must either not be specified or both specified") + } + + if plabel == "" { + processLabel, mountLabel, err := label.InitLabels(options.LabelOpts) + if err != nil { + return nil, err + } + mlabel = mountLabel + options.Flags[processLabelFlag] = processLabel + options.Flags[mountLabelFlag] = mountLabel + } + + clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil) + if err != nil { + return nil, err + } + layer = clayer.ID + + // Normally only `--rm` containers are volatile, but in transient store mode all containers are volatile + if s.transientStore { + options.Volatile = true + } + + return writeToContainerStore(s, func() (*Container, error) { + options.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: len(options.UIDMap) == 0, + HostGIDMapping: len(options.GIDMap) == 0, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + container, err := s.containerStore.create(id, names, imageID, layer, &options) + if err != nil || container == nil { + if err2 := rlstore.Delete(layer); err2 != nil { + if err == nil { + err = fmt.Errorf("deleting layer %#v: %w", layer, err2) + } else { + logrus.Errorf("While recovering from a failure to create a container, error deleting layer %#v: %v", layer, err2) + } + } + } + return container, err + }) +} + +func (s *store) SetMetadata(id, metadata string) error { + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + return rlstore.SetMetadata(id, metadata) + } + if s.imageStore.Exists(id) { + return s.imageStore.SetMetadata(id, metadata) + } + if s.containerStore.Exists(id) { + return s.containerStore.SetMetadata(id, metadata) + } + return ErrNotAnID + }) +} + +func (s *store) Metadata(id string) (string, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) { + if store.Exists(id) { + res, err := store.Metadata(id) + return res, true, err + } + return "", false, nil + }); done { + return res, err + } + + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if store.Exists(id) { + res, err := store.Metadata(id) + return res, true, err + } + return "", false, nil + }); done { + return res, err + } + + if res, done, err := readContainerStore(s, func() (string, bool, error) { + if s.containerStore.Exists(id) { + res, err := s.containerStore.Metadata(id) + return res, true, err + } + return "", false, nil + }); done { + return res, err + } + + return "", ErrNotAnID +} + +func (s *store) ListImageBigData(id string) ([]string, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) { + bigDataNames, err := store.BigDataNames(id) + if err == nil { + return bigDataNames, true, nil + } + return nil, false, nil + }); done { + return res, err + } + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + +func (s *store) ImageBigDataSize(id, key string) (int64, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (int64, bool, error) { + size, err := store.BigDataSize(id, key) + if err == nil { + return size, true, nil + } + return -1, false, nil + }); done { + if err != nil { + return -1, err + } + return res, nil + } + return -1, ErrSizeUnknown +} + +func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { + if res, done, err := readAllImageStores(s, func(ristore roImageStore) (digest.Digest, bool, error) { + d, err := ristore.BigDataDigest(id, key) + if err == nil && d.Validate() == nil { + return d, true, nil + } + return "", false, nil + }); done { + return res, err + } + return "", ErrDigestUnknown +} + +func (s *store) ImageBigData(id, key string) ([]byte, error) { + foundImage := false + if res, done, err := readAllImageStores(s, func(store roImageStore) ([]byte, bool, error) { + data, err := store.BigData(id, key) + if err == nil { + return data, true, nil + } + if store.Exists(id) { + foundImage = true + } + return nil, false, nil + }); done { + return res, err + } + if foundImage { + return nil, fmt.Errorf("locating item named %q for image with ID %q (consider removing the image to resolve the issue): %w", key, id, os.ErrNotExist) + } + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + +// ListLayerBigData retrieves a list of the (possibly large) chunks of +// named data associated with an layer. +func (s *store) ListLayerBigData(id string) ([]string, error) { + foundLayer := false + if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) { + data, err := store.BigDataNames(id) + if err == nil { + return data, true, nil + } + if store.Exists(id) { + foundLayer = true + } + return nil, false, nil + }); done { + return res, err + } + if foundLayer { + return nil, fmt.Errorf("locating big data for layer with ID %q: %w", id, os.ErrNotExist) + } + return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown) +} + +// LayerBigData retrieves a (possibly large) chunk of named data +// associated with a layer. +func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { + foundLayer := false + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (io.ReadCloser, bool, error) { + data, err := store.BigData(id, key) + if err == nil { + return data, true, nil + } + if store.Exists(id) { + foundLayer = true + } + return nil, false, nil + }); done { + return res, err + } + if foundLayer { + return nil, fmt.Errorf("locating item named %q for layer with ID %q: %w", key, id, os.ErrNotExist) + } + return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown) +} + +// SetLayerBigData stores a (possibly large) chunk of named data +// associated with a layer. +func (s *store) SetLayerBigData(id, key string, data io.Reader) error { + _, err := writeToLayerStore(s, func(store rwLayerStore) (struct{}, error) { + return struct{}{}, store.SetBigData(id, key, data) + }) + return err +} + +func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { + _, err := writeToImageStore(s, func() (struct{}, error) { + return struct{}{}, s.imageStore.SetBigData(id, key, data, digestManifest) + }) + return err +} + +func (s *store) ImageSize(id string) (int64, error) { + layerStores, err := s.allLayerStores() + if err != nil { + return -1, err + } + for _, s := range layerStores { + store := s + if err := store.startReading(); err != nil { + return -1, err + } + defer store.stopReading() + } + + // Look for the image's record. + var imageStore roBigDataStore + var image *Image + for _, s := range s.allImageStores() { + store := s + if err := store.startReading(); err != nil { + return -1, err + } + defer store.stopReading() + if image, err = store.Get(id); err == nil { + imageStore = store + break + } + } + if image == nil { + return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + + // Start with a list of the image's top layers, if it has any. + queue := make(map[string]struct{}) + for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if layerID != "" { + queue[layerID] = struct{}{} + } + } + visited := make(map[string]struct{}) + // Walk all of the layers. + var size int64 + for len(visited) < len(queue) { + for layerID := range queue { + // Visit each layer only once. + if _, ok := visited[layerID]; ok { + continue + } + visited[layerID] = struct{}{} + // Look for the layer and the store that knows about it. + var layerStore roLayerStore + var layer *Layer + for _, store := range layerStores { + if layer, err = store.Get(layerID); err == nil { + layerStore = store + break + } + } + if layer == nil { + return -1, fmt.Errorf("locating layer with ID %q: %w", layerID, ErrLayerUnknown) + } + // The UncompressedSize is only valid if there's a digest to go with it. + n := layer.UncompressedSize + if layer.UncompressedDigest == "" { + // Compute the size. + n, err = layerStore.DiffSize("", layer.ID) + if err != nil { + return -1, fmt.Errorf("size/digest of layer with ID %q could not be calculated: %w", layerID, err) + } + } + // Count this layer. + size += n + // Make a note to visit the layer's parent if we haven't already. + if layer.Parent != "" { + queue[layer.Parent] = struct{}{} + } + } + } + + // Count big data items. + names, err := imageStore.BigDataNames(id) + if err != nil { + return -1, fmt.Errorf("reading list of big data items for image %q: %w", id, err) + } + for _, name := range names { + n, err := imageStore.BigDataSize(id, name) + if err != nil { + return -1, fmt.Errorf("reading size of big data item %q for image %q: %w", name, id, err) + } + size += n + } + + return size, nil +} + +func (s *store) ContainerSize(id string) (int64, error) { + layerStores, err := s.allLayerStores() + if err != nil { + return -1, err + } + for _, s := range layerStores { + store := s + if err := store.startReading(); err != nil { + return -1, err + } + defer store.stopReading() + } + + // Get the location of the container directory and container run directory. + // Do it before we lock the container store because they do, too. + cdir, err := s.ContainerDirectory(id) + if err != nil { + return -1, err + } + rdir, err := s.ContainerRunDirectory(id) + if err != nil { + return -1, err + } + + return writeToContainerStore(s, func() (int64, error) { // Yes, s.containerStore.BigDataSize requires a write lock. + // Read the container record. + container, err := s.containerStore.Get(id) + if err != nil { + return -1, err + } + + // Read the container's layer's size. + var layer *Layer + var size int64 + for _, store := range layerStores { + if layer, err = store.Get(container.LayerID); err == nil { + size, err = store.DiffSize("", layer.ID) + if err != nil { + return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err) + } + break + } + } + if layer == nil { + return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown) + } + + // Count big data items. + names, err := s.containerStore.BigDataNames(id) + if err != nil { + return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err) + } + for _, name := range names { + n, err := s.containerStore.BigDataSize(id, name) + if err != nil { + return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err) + } + size += n + } + + // Count the size of our container directory and container run directory. + n, err := directory.Size(cdir) + if err != nil { + return -1, err + } + size += n + n, err = directory.Size(rdir) + if err != nil { + return -1, err + } + size += n + + return size, nil + }) +} + +func (s *store) ListContainerBigData(id string) ([]string, error) { + res, _, err := readContainerStore(s, func() ([]string, bool, error) { + res, err := s.containerStore.BigDataNames(id) + return res, true, err + }) + return res, err +} + +func (s *store) ContainerBigDataSize(id, key string) (int64, error) { + return writeToContainerStore(s, func() (int64, error) { // Yes, BigDataSize requires a write lock. + return s.containerStore.BigDataSize(id, key) + }) +} + +func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { + return writeToContainerStore(s, func() (digest.Digest, error) { // Yes, BigDataDigest requires a write lock. + return s.containerStore.BigDataDigest(id, key) + }) +} + +func (s *store) ContainerBigData(id, key string) ([]byte, error) { + res, _, err := readContainerStore(s, func() ([]byte, bool, error) { + res, err := s.containerStore.BigData(id, key) + return res, true, err + }) + return res, err +} + +func (s *store) SetContainerBigData(id, key string, data []byte) error { + _, err := writeToContainerStore(s, func() (struct{}, error) { + return struct{}{}, s.containerStore.SetBigData(id, key, data) + }) + return err +} + +func (s *store) Exists(id string) bool { + found, _, err := readAllLayerStores(s, func(store roLayerStore) (bool, bool, error) { + if store.Exists(id) { + return true, true, nil + } + return false, false, nil + }) + if err != nil { + return false + } + if found { + return true + } + + found, _, err = readAllImageStores(s, func(store roImageStore) (bool, bool, error) { + if store.Exists(id) { + return true, true, nil + } + return false, false, nil + }) + if err != nil { + return false + } + if found { + return true + } + + found, _, err = readContainerStore(s, func() (bool, bool, error) { + return s.containerStore.Exists(id), true, nil + }) + if err != nil { + return false + } + return found +} + +func dedupeStrings(names []string) []string { + seen := make(map[string]struct{}) + deduped := make([]string, 0, len(names)) + for _, name := range names { + if _, wasSeen := seen[name]; !wasSeen { + seen[name] = struct{}{} + deduped = append(deduped, name) + } + } + return deduped +} + +func dedupeDigests(digests []digest.Digest) []digest.Digest { + seen := make(map[digest.Digest]struct{}) + deduped := make([]digest.Digest, 0, len(digests)) + for _, d := range digests { + if _, wasSeen := seen[d]; !wasSeen { + seen[d] = struct{}{} + deduped = append(deduped, d) + } + } + return deduped +} + +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. +func (s *store) SetNames(id string, names []string) error { + return s.updateNames(id, names, setNames) +} + +func (s *store) AddNames(id string, names []string) error { + return s.updateNames(id, names, addNames) +} + +func (s *store) RemoveNames(id string, names []string) error { + return s.updateNames(id, names, removeNames) +} + +func (s *store) updateNames(id string, names []string, op updateNameOperation) error { + deduped := dedupeStrings(names) + + if found, err := writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) { + if !rlstore.Exists(id) { + return false, nil + } + return true, rlstore.updateNames(id, deduped, op) + }); err != nil || found { + return err + } + + if err := s.imageStore.startWriting(); err != nil { + return err + } + defer s.imageStore.stopWriting() + if s.imageStore.Exists(id) { + return s.imageStore.updateNames(id, deduped, op) + } + + // Check if the id refers to a read-only image store -- we want to allow images in + // read-only stores to have their names changed. + for _, is := range s.roImageStores { + store := is + if err := store.startReading(); err != nil { + return err + } + defer store.stopReading() + if i, err := store.Get(id); err == nil { + // "pull up" the image so that we can change its names list + options := ImageOptions{ + CreationDate: i.Created, + Digest: i.Digest, + Digests: copyDigestSlice(i.Digests), + Metadata: i.Metadata, + NamesHistory: copyStringSlice(i.NamesHistory), + Flags: copyStringInterfaceMap(i.Flags), + } + for _, key := range i.BigDataNames { + data, err := store.BigData(id, key) + if err != nil { + return err + } + dataDigest, err := store.BigDataDigest(id, key) + if err != nil { + return err + } + options.BigData = append(options.BigData, ImageBigDataOption{ + Key: key, + Data: data, + Digest: dataDigest, + }) + } + _, err = s.imageStore.create(id, i.Names, i.TopLayer, options) + if err != nil { + return err + } + // now make the changes to the writeable image record's names list + return s.imageStore.updateNames(id, deduped, op) + } + } + + if found, err := writeToContainerStore(s, func() (bool, error) { + if !s.containerStore.Exists(id) { + return false, nil + } + return true, s.containerStore.updateNames(id, deduped, op) + }); err != nil || found { + return err + } + + return ErrLayerUnknown +} + +func (s *store) Names(id string) ([]string, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) { + if l, err := store.Get(id); l != nil && err == nil { + return l.Names, true, nil + } + return nil, false, nil + }); done { + return res, err + } + + if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) { + if i, err := store.Get(id); i != nil && err == nil { + return i.Names, true, nil + } + return nil, false, nil + }); done { + return res, err + } + + if res, done, err := readContainerStore(s, func() ([]string, bool, error) { + if c, err := s.containerStore.Get(id); c != nil && err == nil { + return c.Names, true, nil + } + return nil, false, nil + }); done { + return res, err + } + + return nil, ErrLayerUnknown +} + +func (s *store) Lookup(name string) (string, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) { + if l, err := store.Get(name); l != nil && err == nil { + return l.ID, true, nil + } + return "", false, nil + }); done { + return res, err + } + + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if i, err := store.Get(name); i != nil && err == nil { + return i.ID, true, nil + } + return "", false, nil + }); done { + return res, err + } + + if res, done, err := readContainerStore(s, func() (string, bool, error) { + if c, err := s.containerStore.Get(name); c != nil && err == nil { + return c.ID, true, nil + } + return "", false, nil + }); done { + return res, err + } + + return "", ErrLayerUnknown +} + +func (s *store) DeleteLayer(id string) error { + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + if l, err := rlstore.Get(id); err != nil { + id = l.ID + } + layers, err := rlstore.Layers() + if err != nil { + return err + } + for _, layer := range layers { + if layer.Parent == id { + return fmt.Errorf("used by layer %v: %w", layer.ID, ErrLayerHasChildren) + } + } + images, err := s.imageStore.Images() + if err != nil { + return err + } + + for _, image := range images { + if image.TopLayer == id { + return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage) + } + } + containers, err := s.containerStore.Containers() + if err != nil { + return err + } + for _, container := range containers { + if container.LayerID == id { + return fmt.Errorf("layer %v used by container %v: %w", id, container.ID, ErrLayerUsedByContainer) + } + } + if err := rlstore.Delete(id); err != nil { + return fmt.Errorf("delete layer %v: %w", id, err) + } + + for _, image := range images { + if stringutils.InSlice(image.MappedTopLayers, id) { + if err = s.imageStore.removeMappedTopLayer(image.ID, id); err != nil { + return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err) + } + } + } + return nil + } + return ErrNotALayer + }) +} + +func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { + layersToRemove := []string{} + if err := s.writeToAllStores(func(rlstore rwLayerStore) error { + // Delete image from all available imagestores configured to be used. + imageFound := false + for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) { + if is != s.imageStore { + // This is an additional writeable image store + // so we must perform lock + if err := is.startWriting(); err != nil { + return err + } + defer is.stopWriting() + } + if !is.Exists(id) { + continue + } + imageFound = true + image, err := is.Get(id) + if err != nil { + return err + } + id = image.ID + containers, err := s.containerStore.Containers() + if err != nil { + return err + } + aContainerByImage := make(map[string]string) + for _, container := range containers { + aContainerByImage[container.ImageID] = container.ID + } + if container, ok := aContainerByImage[id]; ok { + return fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer) + } + images, err := is.Images() + if err != nil { + return err + } + layers, err := rlstore.Layers() + if err != nil { + return err + } + childrenByParent := make(map[string][]string) + for _, layer := range layers { + childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) + } + otherImagesTopLayers := make(map[string]struct{}) + for _, img := range images { + if img.ID != id { + otherImagesTopLayers[img.TopLayer] = struct{}{} + for _, layerID := range img.MappedTopLayers { + otherImagesTopLayers[layerID] = struct{}{} + } + } + } + if commit { + if err = is.Delete(id); err != nil { + return err + } + } + layer := image.TopLayer + layersToRemoveMap := make(map[string]struct{}) + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } + for layer != "" { + if s.containerStore.Exists(layer) { + break + } + if _, used := otherImagesTopLayers[layer]; used { + break + } + parent := "" + if l, err := rlstore.Get(layer); err == nil { + parent = l.Parent + } + hasChildrenNotBeingRemoved := func() bool { + layersToCheck := []string{layer} + if layer == image.TopLayer { + layersToCheck = append(layersToCheck, image.MappedTopLayers...) + } + for _, layer := range layersToCheck { + if childList := childrenByParent[layer]; len(childList) > 0 { + for _, child := range childList { + if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { + continue + } + return true + } + } + } + return false + } + if hasChildrenNotBeingRemoved() { + break + } + layersToRemove = append(layersToRemove, layer) + layersToRemoveMap[layer] = struct{}{} + layer = parent + } + } + if !imageFound { + return ErrNotAnImage + } + if commit { + for _, layer := range layersToRemove { + if err = rlstore.Delete(layer); err != nil { + return err + } + } + } + return nil + }); err != nil { + return nil, err + } + return layersToRemove, nil +} + +func (s *store) DeleteContainer(id string) error { + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if !s.containerStore.Exists(id) { + return ErrNotAContainer + } + + container, err := s.containerStore.Get(id) + if err != nil { + return ErrNotAContainer + } + + // delete the layer first, separately, so that if we get an + // error while trying to do so, we don't go ahead and delete + // the container record that refers to it, effectively losing + // track of it + if rlstore.Exists(container.LayerID) { + if err := rlstore.Delete(container.LayerID); err != nil { + return err + } + } + + var wg multierror.Group + + middleDir := s.graphDriverName + "-containers" + + wg.Go(func() error { + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) + return system.EnsureRemoveAll(gcpath) + }) + + wg.Go(func() error { + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) + return system.EnsureRemoveAll(rcpath) + }) + + if multierr := wg.Wait(); multierr != nil { + return multierr.ErrorOrNil() + } + return s.containerStore.Delete(id) + }) +} + +func (s *store) Delete(id string) error { + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if s.containerStore.Exists(id) { + if container, err := s.containerStore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = s.containerStore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } + return ErrNotALayer + } + } + if s.imageStore.Exists(id) { + return s.imageStore.Delete(id) + } + if rlstore.Exists(id) { + return rlstore.Delete(id) + } + return ErrLayerUnknown + }) +} + +func (s *store) Wipe() error { + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if err := s.containerStore.Wipe(); err != nil { + return err + } + if err := s.imageStore.Wipe(); err != nil { + return err + } + return rlstore.Wipe() + }) +} + +func (s *store) Status() ([][2]string, error) { + rlstore, err := s.getLayerStore() + if err != nil { + return nil, err + } + return rlstore.Status() +} + +func (s *store) Version() ([][2]string, error) { + return [][2]string{}, nil +} + +func (s *store) mount(id string, options drivers.MountOpts) (string, error) { + // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver + // in startUsingGraphDriver(). + if err := s.startUsingGraphDriver(); err != nil { + return "", err + } + defer s.stopUsingGraphDriver() + + rlstore, err := s.getLayerStoreLocked() + if err != nil { + return "", err + } + if err := rlstore.startWriting(); err != nil { + return "", err + } + defer rlstore.stopWriting() + + if options.UidMaps != nil || options.GidMaps != nil { + options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) + } + + if rlstore.Exists(id) { + return rlstore.Mount(id, options) + } + return "", ErrLayerUnknown +} + +func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) { + // Append ReadOnly option to mountOptions + img, err := s.Image(id) + if err != nil { + return "", err + } + + if err := validateMountOptions(mountOpts); err != nil { + return "", err + } + options := drivers.MountOpts{ + MountLabel: mountLabel, + Options: append(mountOpts, "ro"), + } + + return s.mount(img.TopLayer, options) +} + +func (s *store) Mount(id, mountLabel string) (string, error) { + options := drivers.MountOpts{ + MountLabel: mountLabel, + } + // check if `id` is a container, then grab the LayerID, uidmap and gidmap, along with + // otherwise we assume the id is a LayerID and attempt to mount it. + if container, err := s.Container(id); err == nil { + id = container.LayerID + options.UidMaps = container.UIDMap + options.GidMaps = container.GIDMap + options.Options = container.MountOpts() + if !s.disableVolatile { + if v, found := container.Flags[volatileFlag]; found { + if b, ok := v.(bool); ok { + options.Volatile = b + } + } + } + } + return s.mount(id, options) +} + +func (s *store) Mounted(id string) (int, error) { + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID + } + rlstore, err := s.getLayerStore() + if err != nil { + return 0, err + } + if err := rlstore.startReading(); err != nil { + return 0, err + } + defer rlstore.stopReading() + + return rlstore.Mounted(id) +} + +func (s *store) UnmountImage(id string, force bool) (bool, error) { + img, err := s.Image(id) + if err != nil { + return false, err + } + return s.Unmount(img.TopLayer, force) +} + +func (s *store) Unmount(id string, force bool) (bool, error) { + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID + } + return writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) { + if rlstore.Exists(id) { + return rlstore.unmount(id, force, false) + } + return false, ErrLayerUnknown + }) +} + +func (s *store) Changes(from, to string) ([]archive.Change, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]archive.Change, bool, error) { + if store.Exists(to) { + res, err := store.Changes(from, to) + return res, true, err + } + return nil, false, nil + }); done { + return res, err + } + return nil, ErrLayerUnknown +} + +func (s *store) DiffSize(from, to string) (int64, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) { + if store.Exists(to) { + res, err := store.DiffSize(from, to) + return res, true, err + } + return -1, false, nil + }); done { + if err != nil { + return -1, err + } + return res, nil + } + return -1, ErrLayerUnknown +} + +func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { + // NaiveDiff could cause mounts to happen without a lock, so be safe + // and treat the .Diff operation as a Mount. + // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver + // in startUsingGraphDriver(). + if err := s.startUsingGraphDriver(); err != nil { + return nil, err + } + defer s.stopUsingGraphDriver() + + layerStores, err := s.allLayerStoresLocked() + if err != nil { + return nil, err + } + + for _, s := range layerStores { + store := s + if err := store.startReading(); err != nil { + return nil, err + } + if store.Exists(to) { + rc, err := store.Diff(from, to, options) + if rc != nil && err == nil { + wrapped := ioutils.NewReadCloserWrapper(rc, func() error { + err := rc.Close() + store.stopReading() + return err + }) + return wrapped, nil + } + store.stopReading() + return rc, err + } + store.stopReading() + } + return nil, ErrLayerUnknown +} + +func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { + _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + if !rlstore.Exists(to) { + return struct{}{}, ErrLayerUnknown + } + return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + }) + return err +} + +func (s *store) CleanupStagingDirectory(stagingDirectory string) error { + _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory) + }) + return err +} + +func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { + return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) { + if to != "" && !rlstore.Exists(to) { + return nil, ErrLayerUnknown + } + return rlstore.ApplyDiffWithDiffer(to, options, differ) + }) +} + +func (s *store) DifferTarget(id string) (string, error) { + return writeToLayerStore(s, func(rlstore rwLayerStore) (string, error) { + if rlstore.Exists(id) { + return rlstore.DifferTarget(id) + } + return "", ErrLayerUnknown + }) +} + +func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { + return writeToLayerStore(s, func(rlstore rwLayerStore) (int64, error) { + if rlstore.Exists(to) { + return rlstore.ApplyDiff(to, diff) + } + return -1, ErrLayerUnknown + }) +} + +func (s *store) layersByMappedDigest(m func(roLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { + var layers []Layer + if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { + storeLayers, err := m(store, d) + if err != nil { + if !errors.Is(err, ErrLayerUnknown) { + return struct{}{}, true, err + } + return struct{}{}, false, nil + } + layers = append(layers, storeLayers...) + return struct{}{}, false, nil + }); err != nil { + return nil, err + } + if len(layers) == 0 { + return nil, ErrLayerUnknown + } + return layers, nil +} + +func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, fmt.Errorf("looking for compressed layers matching digest %q: %w", d, err) + } + return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) +} + +func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, fmt.Errorf("looking for layers matching digest %q: %w", d, err) + } + return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) +} + +func (s *store) LayerSize(id string) (int64, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) { + if store.Exists(id) { + res, err := store.Size(id) + return res, true, err + } + return -1, false, nil + }); done { + if err != nil { + return -1, err + } + return res, nil + } + return -1, ErrLayerUnknown +} + +func (s *store) LayerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.getLayerStore() + if err != nil { + return nil, nil, err + } + if err := rlstore.startReading(); err != nil { + return nil, nil, err + } + defer rlstore.stopReading() + if rlstore.Exists(id) { + return rlstore.ParentOwners(id) + } + return nil, nil, ErrLayerUnknown +} + +func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.getLayerStore() + if err != nil { + return nil, nil, err + } + if err := rlstore.startReading(); err != nil { + return nil, nil, err + } + defer rlstore.stopReading() + if err := s.containerStore.startReading(); err != nil { + return nil, nil, err + } + defer s.containerStore.stopReading() + container, err := s.containerStore.Get(id) + if err != nil { + return nil, nil, err + } + if rlstore.Exists(container.LayerID) { + return rlstore.ParentOwners(container.LayerID) + } + return nil, nil, ErrLayerUnknown +} + +func (s *store) Layers() ([]Layer, error) { + var layers []Layer + if _, done, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { + storeLayers, err := store.Layers() + if err != nil { + return struct{}{}, true, err + } + layers = append(layers, storeLayers...) + return struct{}{}, false, nil + }); done { + return nil, err + } + return layers, nil +} + +func (s *store) Images() ([]Image, error) { + var images []Image + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { + storeImages, err := store.Images() + if err != nil { + return struct{}{}, true, err + } + images = append(images, storeImages...) + return struct{}{}, false, nil + }); err != nil { + return nil, err + } + return images, nil +} + +func (s *store) Containers() ([]Container, error) { + res, _, err := readContainerStore(s, func() ([]Container, bool, error) { + res, err := s.containerStore.Containers() + return res, true, err + }) + return res, err +} + +func (s *store) Layer(id string) (*Layer, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (*Layer, bool, error) { + layer, err := store.Get(id) + if err == nil { + return layer, true, nil + } + return nil, false, nil + }); done { + return res, err + } + return nil, ErrLayerUnknown +} + +func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) { + var adriver drivers.AdditionalLayerStoreDriver + if err := func() error { // A scope for defer + if err := s.startUsingGraphDriver(); err != nil { + return err + } + defer s.stopUsingGraphDriver() + a, ok := s.graphDriver.(drivers.AdditionalLayerStoreDriver) + if !ok { + return ErrLayerUnknown + } + adriver = a + return nil + }(); err != nil { + return nil, err + } + + al, err := adriver.LookupAdditionalLayer(d, imageref) + if err != nil { + if errors.Is(err, drivers.ErrLayerUnknown) { + return nil, ErrLayerUnknown + } + return nil, err + } + info, err := al.Info() + if err != nil { + return nil, err + } + defer info.Close() + var layer Layer + if err := json.NewDecoder(info).Decode(&layer); err != nil { + return nil, err + } + return &additionalLayer{&layer, al, s}, nil +} + +type additionalLayer struct { + layer *Layer + handler drivers.AdditionalLayer + s *store +} + +func (al *additionalLayer) UncompressedDigest() digest.Digest { + return al.layer.UncompressedDigest +} + +func (al *additionalLayer) CompressedSize() int64 { + return al.layer.CompressedSize +} + +func (al *additionalLayer) PutAs(id, parent string, names []string) (*Layer, error) { + rlstore, rlstores, err := al.s.bothLayerStoreKinds() + if err != nil { + return nil, err + } + if err := rlstore.startWriting(); err != nil { + return nil, err + } + defer rlstore.stopWriting() + + var parentLayer *Layer + if parent != "" { + for _, lstore := range append([]roLayerStore{rlstore}, rlstores...) { + if lstore != rlstore { + if err := lstore.startReading(); err != nil { + return nil, err + } + defer lstore.stopReading() + } + parentLayer, err = lstore.Get(parent) + if err == nil { + break + } + } + if parentLayer == nil { + return nil, ErrLayerUnknown + } + } + + return rlstore.PutAdditionalLayer(id, parentLayer, names, al.handler) +} + +func (al *additionalLayer) Release() { + al.handler.Release() +} + +func (s *store) Image(id string) (*Image, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (*Image, bool, error) { + image, err := store.Get(id) + if err == nil { + if store != s.imageStore { + // found it in a read-only store - readAllImageStores() still has the writeable store locked for reading + if _, localErr := s.imageStore.Get(image.ID); localErr == nil { + // if the lookup key was a name, and we found the image in a read-only + // store, but we have an entry with the same ID in the read-write store, + // then the name was removed when we duplicated the image's + // record into writable storage, so we should ignore this entry + return nil, false, nil + } + } + return image, true, nil + } + return nil, false, nil + }); done { + return res, err + } + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + +func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { + layer, err := s.Layer(id) + if err != nil { + return nil, err + } + + images := []*Image{} + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { + imageList, err := store.Images() + if err != nil { + return struct{}{}, true, err + } + for _, image := range imageList { + image := image + if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) { + images = append(images, &image) + } + } + return struct{}{}, false, nil + }); err != nil { + return nil, err + } + return images, nil +} + +func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { + images := []*Image{} + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { + imageList, err := store.ByDigest(d) + if err != nil && !errors.Is(err, ErrImageUnknown) { + return struct{}{}, true, err + } + images = append(images, imageList...) + return struct{}{}, false, nil + }); err != nil { + return nil, err + } + return images, nil +} + +func (s *store) Container(id string) (*Container, error) { + res, _, err := readContainerStore(s, func() (*Container, bool, error) { + res, err := s.containerStore.Get(id) + return res, true, err + }) + return res, err +} + +func (s *store) ContainerLayerID(id string) (string, error) { + container, _, err := readContainerStore(s, func() (*Container, bool, error) { + res, err := s.containerStore.Get(id) + return res, true, err + }) + if err != nil { + return "", err + } + return container.LayerID, nil +} + +func (s *store) ContainerByLayer(id string) (*Container, error) { + layer, err := s.Layer(id) + if err != nil { + return nil, err + } + containerList, _, err := readContainerStore(s, func() ([]Container, bool, error) { + res, err := s.containerStore.Containers() + return res, true, err + }) + if err != nil { + return nil, err + } + for _, container := range containerList { + if container.LayerID == layer.ID { + return &container, nil + } + } + + return nil, ErrContainerUnknown +} + +func (s *store) ContainerDirectory(id string) (string, error) { + res, _, err := readContainerStore(s, func() (string, bool, error) { + id, err := s.containerStore.Lookup(id) + if err != nil { + return "", true, err + } + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gcpath, 0o700); err != nil { + return "", true, err + } + return gcpath, true, nil + }) + return res, err +} + +func (s *store) ContainerRunDirectory(id string) (string, error) { + res, _, err := readContainerStore(s, func() (string, bool, error) { + id, err := s.containerStore.Lookup(id) + if err != nil { + return "", true, err + } + + middleDir := s.graphDriverName + "-containers" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0o700); err != nil { + return "", true, err + } + return rcpath, true, nil + }) + return res, err +} + +func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { + dir, err := s.ContainerDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600) +} + +func (s *store) FromContainerDirectory(id, file string) ([]byte, error) { + dir, err := s.ContainerDirectory(id) + if err != nil { + return nil, err + } + return os.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error { + dir, err := s.ContainerRunDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600) +} + +func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) { + dir, err := s.ContainerRunDirectory(id) + if err != nil { + return nil, err + } + return os.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) Shutdown(force bool) ([]string, error) { + mounted := []string{} + + if err := s.startUsingGraphDriver(); err != nil { + return mounted, err + } + defer s.stopUsingGraphDriver() + + rlstore, err := s.getLayerStoreLocked() + if err != nil { + return mounted, err + } + if err := rlstore.startWriting(); err != nil { + return nil, err + } + defer rlstore.stopWriting() + + layers, err := rlstore.Layers() + if err != nil { + return mounted, err + } + for _, layer := range layers { + if layer.MountCount == 0 { + continue + } + mounted = append(mounted, layer.ID) + if force { + for { + _, err2 := rlstore.unmount(layer.ID, force, true) + if err2 == ErrLayerNotMounted { + break + } + if err2 != nil { + if err == nil { + err = err2 + } + break + } + } + } + } + if len(mounted) > 0 && err == nil { + err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer) + } + if err == nil { + // We don’t retain the lastWrite value, and treat this update as if someone else did the .Cleanup(), + // so that we reload after a .Shutdown() the same way other processes would. + // Shutdown() is basically an error path, so reliability is more important than performance. + if _, err2 := s.graphLock.RecordWrite(); err2 != nil { + err = fmt.Errorf("graphLock.RecordWrite failed: %w", err2) + } + // Do the Cleanup() only after we are sure that the change was recorded with RecordWrite(), so that + // the next user picks it. + if err == nil { + err = s.graphDriver.Cleanup() + } + } + return mounted, err +} + +// Convert a BigData key name into an acceptable file name. +func makeBigDataBaseName(key string) string { + reader := strings.NewReader(key) + for reader.Len() > 0 { + ch, size, err := reader.ReadRune() + if err != nil || size != 1 { + break + } + if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') { + break + } + } + if reader.Len() > 0 { + return "=" + base64.StdEncoding.EncodeToString([]byte(key)) + } + return key +} + +func stringSliceWithoutValue(slice []string, value string) []string { + modified := make([]string, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + +func copyStringSlice(slice []string) []string { + if len(slice) == 0 { + return nil + } + ret := make([]string, len(slice)) + copy(ret, slice) + return ret +} + +func copyStringInt64Map(m map[string]int64) map[string]int64 { + ret := make(map[string]int64, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest { + ret := make(map[string]digest.Digest, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyStringStringMap(m map[string]string) map[string]string { + ret := make(map[string]string, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyDigestSlice(slice []digest.Digest) []digest.Digest { + if len(slice) == 0 { + return nil + } + ret := make([]digest.Digest, len(slice)) + copy(ret, slice) + return ret +} + +// copyStringInterfaceMap still forces us to assume that the interface{} is +// a non-pointer scalar value +func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} { + ret := make(map[string]interface{}, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyLayerBigDataOptionSlice(slice []LayerBigDataOption) []LayerBigDataOption { + ret := make([]LayerBigDataOption, len(slice)) + copy(ret, slice) + return ret +} + +func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOption { + ret := make([]ImageBigDataOption, len(slice)) + for i := range slice { + ret[i].Key = slice[i].Key + ret[i].Data = append([]byte{}, slice[i].Data...) + ret[i].Digest = slice[i].Digest + } + return ret +} + +func copyContainerBigDataOptionSlice(slice []ContainerBigDataOption) []ContainerBigDataOption { + ret := make([]ContainerBigDataOption, len(slice)) + for i := range slice { + ret[i].Key = slice[i].Key + ret[i].Data = append([]byte{}, slice[i].Data...) + } + return ret +} + +// AutoUserNsMinSize is the minimum size for automatically created user namespaces +const AutoUserNsMinSize = 1024 + +// AutoUserNsMaxSize is the maximum size for automatically created user namespaces +const AutoUserNsMaxSize = 65536 + +// RootAutoUserNsUser is the default user used for root containers when automatically +// creating a user namespace. +const RootAutoUserNsUser = "containers" + +// SetDefaultConfigFilePath sets the default configuration to the specified path, and loads the file. +// Deprecated: Use types.SetDefaultConfigFilePath, which can return an error. +func SetDefaultConfigFilePath(path string) { + _ = types.SetDefaultConfigFilePath(path) +} + +// DefaultConfigFile returns the path to the storage config file used +func DefaultConfigFile(rootless bool) (string, error) { + return types.DefaultConfigFile(rootless) +} + +// ReloadConfigurationFile parses the specified configuration file and overrides +// the configuration in storeOptions. +// Deprecated: Use types.ReloadConfigurationFile, which can return an error. +func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions) { + _ = types.ReloadConfigurationFile(configFile, storeOptions) +} + +// GetDefaultMountOptions returns the default mountoptions defined in container/storage +func GetDefaultMountOptions() ([]string, error) { + defaultStoreOptions, err := types.Options() + if err != nil { + return nil, err + } + return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions) +} + +// GetMountOptions returns the mountoptions for the specified driver and graphDriverOptions +func GetMountOptions(driver string, graphDriverOptions []string) ([]string, error) { + mountOpts := []string{ + ".mountopt", + fmt.Sprintf("%s.mountopt", driver), + } + for _, option := range graphDriverOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + for _, m := range mountOpts { + if m == key { + return strings.Split(val, ","), nil + } + } + } + return nil, nil +} + +// Free removes the store from the list of stores +func (s *store) Free() { + for i := 0; i < len(stores); i++ { + if stores[i] == s { + stores = append(stores[:i], stores[i+1:]...) + return + } + } +} + +// Tries to clean up old unreferenced container leftovers. returns the first error +// but continues as far as it can +func (s *store) GarbageCollect() error { + _, firstErr := writeToContainerStore(s, func() (struct{}, error) { + return struct{}{}, s.containerStore.GarbageCollect() + }) + + _, moreErr := writeToImageStore(s, func() (struct{}, error) { + return struct{}{}, s.imageStore.GarbageCollect() + }) + if firstErr == nil { + firstErr = moreErr + } + + _, moreErr = writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + return struct{}{}, rlstore.GarbageCollect() + }) + if firstErr == nil { + firstErr = moreErr + } + + return firstErr +} diff --git a/vendor/github.com/containers/storage/types/default_override_test.conf b/vendor/github.com/containers/storage/types/default_override_test.conf new file mode 100644 index 0000000000..caa537ba98 --- /dev/null +++ b/vendor/github.com/containers/storage/types/default_override_test.conf @@ -0,0 +1,11 @@ +[storage] + +# Default Storage Driver +driver = "" + +# Primary Read/Write location of container storage +graphroot = "environment_override_graphroot" + +# Storage path for rootless users +# +rootless_storage_path = "environment_override_rootless_storage_path" diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go new file mode 100644 index 0000000000..845b14eed7 --- /dev/null +++ b/vendor/github.com/containers/storage/types/errors.go @@ -0,0 +1,99 @@ +package types + +import ( + "errors" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID. + ErrContainerUnknown = errors.New("container not known") + // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. + ErrDigestUnknown = errors.New("could not compute digest of item") + // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. + ErrDuplicateID = errors.New("that ID is already in use") + // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. + ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images") + // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. + ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers") + // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. + ErrDuplicateName = errors.New("that name is already in use") + // ErrImageUnknown indicates that there was no image with the specified name or ID. + ErrImageUnknown = errors.New("image not known") + // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. + ErrImageUsedByContainer = errors.New("image is in use by a container") + // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. + ErrIncompleteOptions = errors.New("missing necessary StoreOptions") + // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. + ErrInvalidBigDataName = errors.New("not a valid name for a big data item") + // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. + ErrLayerHasChildren = errors.New("layer has children") + // ErrLayerNotMounted is returned when the requested information can only be computed for a mounted layer, and the layer is not mounted. + ErrLayerNotMounted = errors.New("layer is not mounted") + // ErrLayerUnknown indicates that there was no layer with the specified name or ID. + ErrLayerUnknown = errors.New("layer not known") + // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. + ErrLayerUsedByContainer = errors.New("layer is in use by a container") + // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. + ErrLayerUsedByImage = errors.New("layer is in use by an image") + // ErrLoadError indicates that there was an initialization error. + ErrLoadError = errors.New("error loading storage metadata") + // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. + ErrNotAContainer = errors.New("identifier is not a container") + // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. + ErrNotALayer = errors.New("identifier is not a layer") + // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. + ErrNotAnID = errors.New("identifier is not a layer, image, or container") + // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. + ErrNotAnImage = errors.New("identifier is not an image") + // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. + ErrParentIsContainer = errors.New("would-be parent layer is a container") + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. + ErrParentUnknown = errors.New("parent of layer not known") + // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. + ErrSizeUnknown = errors.New("size is not known") + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") + // ErrNotSupported is returned when the requested functionality is not supported. + ErrNotSupported = errors.New("not supported") + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = errors.New("invalid mappings specified") + // ErrNoAvailableIDs is returned when there are not enough unused IDS within the user namespace. + ErrNoAvailableIDs = errors.New("not enough unused IDs in user namespace") + + // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver, + // but which is not known to or managed by the higher-level driver-agnostic logic. + ErrLayerUnaccounted = errors.New("layer in lower level storage driver not accounted for") + // ErrLayerUnreferenced describes a layer which is not used by any image or container. + ErrLayerUnreferenced = errors.New("layer not referenced by any images or containers") + // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more + // files which were added in the layer appear to have changed. It may instead look like an + // unnamed "file integrity checksum failed" error. + ErrLayerIncorrectContentDigest = errors.New("layer content incorrect digest") + // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was + // used to populate the layer produced a diff of a different size. We check the digest + // first, so it's highly unlikely you'll ever see this error. + ErrLayerIncorrectContentSize = errors.New("layer content incorrect size") + // ErrLayerContentModified describes a layer which contains contents which should not be + // there, or for which ownership/permissions/dates have been changed. + ErrLayerContentModified = errors.New("layer content modified") + // ErrLayerDataMissing describes a layer which is missing a big data item. + ErrLayerDataMissing = errors.New("layer data item is missing") + // ErrLayerMissing describes a layer which is the missing parent of a layer. + ErrLayerMissing = errors.New("layer is missing") + // ErrImageLayerMissing describes an image which claims to have a layer that we don't know + // about. + ErrImageLayerMissing = errors.New("image layer is missing") + // ErrImageDataMissing describes an image which is missing a big data item. + ErrImageDataMissing = errors.New("image data item is missing") + // ErrImageDataIncorrectSize describes an image which has a big data item which looks like + // its size has changed, likely because it's been modified somehow. + ErrImageDataIncorrectSize = errors.New("image data item has incorrect size") + // ErrContainerImageMissing describes a container which claims to be based on an image that + // we don't know about. + ErrContainerImageMissing = errors.New("image missing") + // ErrContainerDataMissing describes a container which is missing a big data item. + ErrContainerDataMissing = errors.New("container data item is missing") + // ErrContainerDataIncorrectSize describes a container which has a big data item which looks + // like its size has changed, likely because it's been modified somehow. + ErrContainerDataIncorrectSize = errors.New("container data item has incorrect size") +) diff --git a/vendor/github.com/containers/storage/types/idmappings.go b/vendor/github.com/containers/storage/types/idmappings.go new file mode 100644 index 0000000000..aabdf7a81f --- /dev/null +++ b/vendor/github.com/containers/storage/types/idmappings.go @@ -0,0 +1,101 @@ +package types + +import ( + "fmt" + "os" + + "github.com/containers/storage/pkg/idtools" +) + +// AutoUserNsOptions defines how to automatically create a user namespace. +type AutoUserNsOptions struct { + // Size defines the size for the user namespace. If it is set to a + // value bigger than 0, the user namespace will have exactly this size. + // If it is not set, some heuristics will be used to find its size. + Size uint32 + // InitialSize defines the minimum size for the user namespace. + // The created user namespace will have at least this size. + InitialSize uint32 + // PasswdFile to use if the container uses a volume. + PasswdFile string + // GroupFile to use if the container uses a volume. + GroupFile string + // AdditionalUIDMappings specified additional UID mappings to include in + // the generated user namespace. + AdditionalUIDMappings []idtools.IDMap + // AdditionalGIDMappings specified additional GID mappings to include in + // the generated user namespace. + AdditionalGIDMappings []idtools.IDMap +} + +// IDMappingOptions are used for specifying how ID mapping should be set up for +// a layer or container. +type IDMappingOptions struct { + // UIDMap and GIDMap are used for setting up a layer's root filesystem + // for use inside of a user namespace where ID mapping is being used. + // If HostUIDMapping/HostGIDMapping is true, no mapping of the + // respective type will be used. Otherwise, if UIDMap and/or GIDMap + // contain at least one mapping, one or both will be used. By default, + // if neither of those conditions apply, if the layer has a parent + // layer, the parent layer's mapping will be used, and if it does not + // have a parent layer, the mapping which was passed to the Store + // object when it was initialized will be used. + HostUIDMapping bool + HostGIDMapping bool + UIDMap []idtools.IDMap + GIDMap []idtools.IDMap + AutoUserNs bool + AutoUserNsOpts AutoUserNsOptions +} + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) { + options := IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + if subGIDMap == "" && subUIDMap != "" { + subGIDMap = subUIDMap + } + if subUIDMap == "" && subGIDMap != "" { + subUIDMap = subGIDMap + } + if len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 { + GIDMapSlice = UIDMapSlice + } + if len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 { + UIDMapSlice = GIDMapSlice + } + if len(UIDMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 { + UIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())} + } + if len(GIDMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 { + GIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())} + } + + if subUIDMap != "" && subGIDMap != "" { + mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) + if err != nil { + return nil, fmt.Errorf("failed to create NewIDMappings for uidmap=%s gidmap=%s: %w", subUIDMap, subGIDMap, err) + } + options.UIDMap = mappings.UIDs() + options.GIDMap = mappings.GIDs() + } + parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID") + if err != nil { + return nil, fmt.Errorf("failed to create ParseUIDMap UID=%s: %w", UIDMapSlice, err) + } + parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID") + if err != nil { + return nil, fmt.Errorf("failed to create ParseGIDMap GID=%s: %w", UIDMapSlice, err) + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) + options.GIDMap = append(options.GIDMap, parsedGIDMap...) + if len(options.UIDMap) > 0 { + options.HostUIDMapping = false + } + if len(options.GIDMap) > 0 { + options.HostGIDMapping = false + } + return &options, nil +} diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go new file mode 100644 index 0000000000..5ae667a493 --- /dev/null +++ b/vendor/github.com/containers/storage/types/options.go @@ -0,0 +1,554 @@ +package types + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/BurntSushi/toml" + cfg "github.com/containers/storage/pkg/config" + "github.com/containers/storage/pkg/idtools" + "github.com/sirupsen/logrus" +) + +// TOML-friendly explicit tables used for conversions. +type TomlConfig struct { + Storage struct { + Driver string `toml:"driver,omitempty"` + DriverPriority []string `toml:"driver_priority,omitempty"` + RunRoot string `toml:"runroot,omitempty"` + ImageStore string `toml:"imagestore,omitempty"` + GraphRoot string `toml:"graphroot,omitempty"` + RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` + TransientStore bool `toml:"transient_store,omitempty"` + Options cfg.OptionsConfig `toml:"options,omitempty"` + } `toml:"storage"` +} + +const ( + overlayDriver = "overlay" + overlay2 = "overlay2" + storageConfEnv = "CONTAINERS_STORAGE_CONF" +) + +var ( + defaultStoreOptionsOnce sync.Once + loadDefaultStoreOptionsErr error + once sync.Once + storeOptions StoreOptions + storeError error + defaultConfigFileSet bool + // defaultConfigFile path to the system wide storage.conf file + defaultConfigFile = SystemConfigFile + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) + +func loadDefaultStoreOptions() { + defaultStoreOptions.GraphDriverName = "" + + setDefaults := func() { + // reload could set values to empty for run and graph root if config does not contains anything + if defaultStoreOptions.RunRoot == "" { + defaultStoreOptions.RunRoot = defaultRunRoot + } + if defaultStoreOptions.GraphRoot == "" { + defaultStoreOptions.GraphRoot = defaultGraphRoot + } + } + setDefaults() + + if path, ok := os.LookupEnv(storageConfEnv); ok { + defaultOverrideConfigFile = path + if err := ReloadConfigurationFileIfNeeded(path, &defaultStoreOptions); err != nil { + loadDefaultStoreOptionsErr = err + return + } + setDefaults() + return + } + + if path, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok { + homeConfigFile := filepath.Join(path, "containers", "storage.conf") + if _, err := os.Stat(homeConfigFile); err == nil { + // user storage.conf in XDG_CONFIG_HOME if it exists + defaultOverrideConfigFile = homeConfigFile + } else { + if !os.IsNotExist(err) { + loadDefaultStoreOptionsErr = err + return + } + } + } + + _, err := os.Stat(defaultOverrideConfigFile) + if err == nil { + // The DefaultConfigFile(rootless) function returns the path + // of the used storage.conf file, by returning defaultConfigFile + // If override exists containers/storage uses it by default. + defaultConfigFile = defaultOverrideConfigFile + if err := ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions); err != nil { + loadDefaultStoreOptionsErr = err + return + } + setDefaults() + return + } + + if !os.IsNotExist(err) { + logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) + } + if err := ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions); err != nil && !errors.Is(err, os.ErrNotExist) { + loadDefaultStoreOptionsErr = err + return + } + setDefaults() +} + +// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. +// Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function. +func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) { + var ( + defaultRootlessRunRoot string + defaultRootlessGraphRoot string + err error + ) + defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) + if loadDefaultStoreOptionsErr != nil { + return StoreOptions{}, loadDefaultStoreOptionsErr + } + storageOpts := defaultStoreOptions + if rootless && rootlessUID != 0 { + storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts) + if err != nil { + return storageOpts, err + } + } + _, err = os.Stat(storageConf) + if err != nil && !os.IsNotExist(err) { + return storageOpts, err + } + if err == nil && !defaultConfigFileSet { + defaultRootlessRunRoot = storageOpts.RunRoot + defaultRootlessGraphRoot = storageOpts.GraphRoot + storageOpts = StoreOptions{} + reloadConfigurationFileIfNeeded(storageConf, &storageOpts) + if rootless && rootlessUID != 0 { + // If the file did not specify a graphroot or runroot, + // set sane defaults so we don't try and use root-owned + // directories + if storageOpts.RunRoot == "" { + storageOpts.RunRoot = defaultRootlessRunRoot + } + if storageOpts.GraphRoot == "" { + if storageOpts.RootlessStoragePath != "" { + storageOpts.GraphRoot = storageOpts.RootlessStoragePath + } else { + storageOpts.GraphRoot = defaultRootlessGraphRoot + } + } + } + } + if storageOpts.RunRoot == "" { + return storageOpts, fmt.Errorf("runroot must be set") + } + runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID) + if err != nil { + return storageOpts, err + } + storageOpts.RunRoot = runRoot + + if storageOpts.GraphRoot == "" { + return storageOpts, fmt.Errorf("graphroot must be set") + } + graphRoot, err := expandEnvPath(storageOpts.GraphRoot, rootlessUID) + if err != nil { + return storageOpts, err + } + storageOpts.GraphRoot = graphRoot + + if storageOpts.RootlessStoragePath != "" { + storagePath, err := expandEnvPath(storageOpts.RootlessStoragePath, rootlessUID) + if err != nil { + return storageOpts, err + } + storageOpts.RootlessStoragePath = storagePath + } + + if storageOpts.ImageStore != "" && storageOpts.ImageStore == storageOpts.GraphRoot { + return storageOpts, fmt.Errorf("imagestore %s must either be not set or be a different than graphroot", storageOpts.ImageStore) + } + + return storageOpts, nil +} + +// loadStoreOptions returns the default storage ops for containers +func loadStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { + storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0) + if err != nil { + return defaultStoreOptions, err + } + return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf) +} + +// UpdateOptions should be called iff container engine received a SIGHUP, +// otherwise use DefaultStoreOptions +func UpdateStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { + storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) + return storeOptions, storeError +} + +// DefaultStoreOptions returns the default storage ops for containers +func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { + once.Do(func() { + storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) + }) + return storeOptions, storeError +} + +// StoreOptions is used for passing initialization options to GetStore(), for +// initializing a Store object and the underlying storage that it controls. +type StoreOptions struct { + // RunRoot is the filesystem path under which we can store run-time + // information, such as the locations of active mount points, that we + // want to lose if the host is rebooted. + RunRoot string `json:"runroot,omitempty"` + // GraphRoot is the filesystem path under which we will store the + // contents of layers, images, and containers. + GraphRoot string `json:"root,omitempty"` + // Image Store is the alternate location of image store if a location + // separate from the container store is required. + ImageStore string `json:"imagestore,omitempty"` + // RootlessStoragePath is the storage path for rootless users + // default $HOME/.local/share/containers/storage + RootlessStoragePath string `toml:"rootless_storage_path"` + // If the driver is not specified, the best suited driver will be picked + // either from GraphDriverPriority, if specified, or from the platform + // dependent priority list (in that order). + GraphDriverName string `json:"driver,omitempty"` + // GraphDriverPriority is a list of storage drivers that will be tried + // to initialize the Store for a given RunRoot and GraphRoot unless a + // GraphDriverName is set. + // This list can be used to define a custom order in which the drivers + // will be tried. + GraphDriverPriority []string `json:"driver-priority,omitempty"` + // GraphDriverOptions are driver-specific options. + GraphDriverOptions []string `json:"driver-options,omitempty"` + // UIDMap and GIDMap are used for setting up a container's root filesystem + // for use inside of a user namespace where UID mapping is being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + // RootAutoNsUser is the user used to pick a subrange when automatically setting + // a user namespace for the root user. + RootAutoNsUser string `json:"root_auto_ns_user,omitempty"` + // AutoNsMinSize is the minimum size for an automatic user namespace. + AutoNsMinSize uint32 `json:"auto_userns_min_size,omitempty"` + // AutoNsMaxSize is the maximum size for an automatic user namespace. + AutoNsMaxSize uint32 `json:"auto_userns_max_size,omitempty"` + // PullOptions specifies options to be handed to pull managers + // This API is experimental and can be changed without bumping the major version number. + PullOptions map[string]string `toml:"pull_options"` + // DisableVolatile doesn't allow volatile mounts when it is set. + DisableVolatile bool `json:"disable-volatile,omitempty"` + // If transient, don't persist containers over boot (stores db in runroot) + TransientStore bool `json:"transient_store,omitempty"` +} + +// isRootlessDriver returns true if the given storage driver is valid for containers running as non root +func isRootlessDriver(driver string) bool { + validDrivers := map[string]bool{ + "btrfs": true, + "overlay": true, + "overlay2": true, + "vfs": true, + } + return validDrivers[driver] +} + +// getRootlessStorageOpts returns the storage opts for containers running as non root +func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { + var opts StoreOptions + + dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) + if err != nil { + return opts, err + } + opts.RunRoot = rootlessRuntime + opts.PullOptions = systemOpts.PullOptions + if systemOpts.RootlessStoragePath != "" { + opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID) + if err != nil { + return opts, err + } + } else { + opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") + } + + if driver := systemOpts.GraphDriverName; isRootlessDriver(driver) { + opts.GraphDriverName = driver + } + if driver := os.Getenv("STORAGE_DRIVER"); driver != "" { + opts.GraphDriverName = driver + } + if opts.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") + opts.GraphDriverName = overlayDriver + } + + // If the configuration file was explicitly set, then copy all the options + // present. + if defaultConfigFileSet { + opts.GraphDriverOptions = systemOpts.GraphDriverOptions + opts.ImageStore = systemOpts.ImageStore + } else if opts.GraphDriverName == overlayDriver { + for _, o := range systemOpts.GraphDriverOptions { + if strings.Contains(o, "ignore_chown_errors") { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) + break + } + } + } + if opts.GraphDriverName == "" { + if len(systemOpts.GraphDriverPriority) == 0 { + dirEntries, err := os.ReadDir(opts.GraphRoot) + if err == nil { + for _, entry := range dirEntries { + if strings.HasSuffix(entry.Name(), "-images") { + opts.GraphDriverName = strings.TrimSuffix(entry.Name(), "-images") + break + } + } + } + + if opts.GraphDriverName == "" { + if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) { + opts.GraphDriverName = overlayDriver + } else { + opts.GraphDriverName = "vfs" + } + } + } else { + opts.GraphDriverPriority = systemOpts.GraphDriverPriority + } + } + + if os.Getenv("STORAGE_OPTS") != "" { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) + } + + return opts, nil +} + +// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) { + uid := getRootlessUID() + return DefaultStoreOptions(uid != 0, uid) +} + +var prevReloadConfig = struct { + storeOptions *StoreOptions + mod time.Time + mutex sync.Mutex + configFile string +}{} + +// SetDefaultConfigFilePath sets the default configuration to the specified path +func SetDefaultConfigFilePath(path string) error { + defaultConfigFile = path + defaultConfigFileSet = true + return ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) +} + +func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) error { + prevReloadConfig.mutex.Lock() + defer prevReloadConfig.mutex.Unlock() + + fi, err := os.Stat(configFile) + if err != nil { + return err + } + + mtime := fi.ModTime() + if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { + *storeOptions = *prevReloadConfig.storeOptions + return nil + } + + if err := ReloadConfigurationFile(configFile, storeOptions); err != nil { + return err + } + + cOptions := *storeOptions + prevReloadConfig.storeOptions = &cOptions + prevReloadConfig.mod = mtime + prevReloadConfig.configFile = configFile + return nil +} + +// ReloadConfigurationFile parses the specified configuration file and overrides +// the configuration in storeOptions. +func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) error { + config := new(TomlConfig) + + meta, err := toml.DecodeFile(configFile, &config) + if err == nil { + keys := meta.Undecoded() + if len(keys) > 0 { + logrus.Warningf("Failed to decode the keys %q from %q", keys, configFile) + } + } else { + if !os.IsNotExist(err) { + logrus.Warningf("Failed to read %s %v\n", configFile, err.Error()) + return err + } + } + + // Clear storeOptions of previous settings + *storeOptions = StoreOptions{} + if config.Storage.Driver != "" { + storeOptions.GraphDriverName = config.Storage.Driver + } + if os.Getenv("STORAGE_DRIVER") != "" { + config.Storage.Driver = os.Getenv("STORAGE_DRIVER") + storeOptions.GraphDriverName = config.Storage.Driver + } + if storeOptions.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") + storeOptions.GraphDriverName = overlayDriver + } + storeOptions.GraphDriverPriority = config.Storage.DriverPriority + if storeOptions.GraphDriverName == "" && len(storeOptions.GraphDriverPriority) == 0 { + logrus.Warnf("The storage 'driver' option should be set in %s. A driver was picked automatically.", configFile) + } + if config.Storage.RunRoot != "" { + storeOptions.RunRoot = config.Storage.RunRoot + } + if config.Storage.GraphRoot != "" { + storeOptions.GraphRoot = config.Storage.GraphRoot + } + if config.Storage.ImageStore != "" { + storeOptions.ImageStore = config.Storage.ImageStore + } + if config.Storage.RootlessStoragePath != "" { + storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath + } + for _, s := range config.Storage.Options.AdditionalImageStores { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) + } + for _, s := range config.Storage.Options.AdditionalLayerStores { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.additionallayerstore=%s", config.Storage.Driver, s)) + } + if config.Storage.Options.Size != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) + } + if config.Storage.Options.MountProgram != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram)) + } + if config.Storage.Options.SkipMountHome != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome)) + } + if config.Storage.Options.IgnoreChownErrors != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors)) + } + if config.Storage.Options.ForceMask != 0 { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.force_mask=%o", config.Storage.Driver, config.Storage.Options.ForceMask)) + } + if config.Storage.Options.MountOpt != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt)) + } + + uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") + if err != nil { + return err + } + gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") + if err != nil { + return err + } + + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { + config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser + } + if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" { + config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" { + mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup) + if err != nil { + logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) + return err + } + uidmap = mappings.UIDs() + gidmap = mappings.GIDs() + } + storeOptions.UIDMap = uidmap + storeOptions.GIDMap = gidmap + storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser + if config.Storage.Options.AutoUsernsMinSize > 0 { + storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize + } + if config.Storage.Options.AutoUsernsMaxSize > 0 { + storeOptions.AutoNsMaxSize = config.Storage.Options.AutoUsernsMaxSize + } + if config.Storage.Options.PullOptions != nil { + storeOptions.PullOptions = config.Storage.Options.PullOptions + } + + storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile + storeOptions.TransientStore = config.Storage.TransientStore + + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) + + if opts, ok := os.LookupEnv("STORAGE_OPTS"); ok { + storeOptions.GraphDriverOptions = strings.Split(opts, ",") + } + if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" { + storeOptions.GraphDriverOptions = nil + } + return nil +} + +func Options() (StoreOptions, error) { + defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) + return defaultStoreOptions, loadDefaultStoreOptionsErr +} + +// Save overwrites the tomlConfig in storage.conf with the given conf +func Save(conf TomlConfig, rootless bool) error { + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return err + } + + if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil { + return err + } + + f, err := os.Create(configFile) + if err != nil { + return err + } + + return toml.NewEncoder(f).Encode(conf) +} + +// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it +func StorageConfig(rootless bool) (*TomlConfig, error) { + config := new(TomlConfig) + + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return nil, err + } + + _, err = toml.DecodeFile(configFile, &config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go new file mode 100644 index 0000000000..3eecc2b827 --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_darwin.go @@ -0,0 +1,16 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" + SystemConfigFile = "/usr/share/containers/storage.conf" +) + +var defaultOverrideConfigFile = "/etc/containers/storage.conf" + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + return false +} diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go new file mode 100644 index 0000000000..be2bc2f27d --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_freebsd.go @@ -0,0 +1,19 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/var/run/containers/storage" + defaultGraphRoot string = "/var/db/containers/storage" + SystemConfigFile = "/usr/local/share/containers/storage.conf" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf" +) + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + return false +} diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go new file mode 100644 index 0000000000..a28e82883c --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_linux.go @@ -0,0 +1,52 @@ +package types + +import ( + "os/exec" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" + SystemConfigFile = "/usr/share/containers/storage.conf" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultOverrideConfigFile = "/etc/containers/storage.conf" +) + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + // we check first for fuse-overlayfs since it is cheaper. + if path, _ := exec.LookPath("fuse-overlayfs"); path != "" { + return true + } + + // We cannot use overlay.SupportsNativeOverlay since canUseRootlessOverlay is called by Podman + // before we enter the user namespace and the driver we pick here is written in the podman database. + // Checking the kernel version is usually not a good idea since the feature could be back-ported, e.g. RHEL + // but this is just an heuristic and on RHEL we always install the storage.conf file. + // native overlay for rootless was added upstream in 5.13 (at least the first version that we support), so check + // that the kernel is >= 5.13. + var uts unix.Utsname + if err := unix.Uname(&uts); err == nil { + parts := strings.Split(string(uts.Release[:]), ".") + major, _ := strconv.Atoi(parts[0]) + if major >= 6 { + return true + } + if major == 5 && len(parts) > 1 { + minor, _ := strconv.Atoi(parts[1]) + if minor >= 13 { + return true + } + } + } + return false +} diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go new file mode 100644 index 0000000000..c1bea9fac0 --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_windows.go @@ -0,0 +1,19 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" + SystemConfigFile = "/usr/share/containers/storage.conf" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultOverrideConfigFile = "/etc/containers/storage.conf" +) + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + return false +} diff --git a/vendor/github.com/containers/storage/types/storage_broken.conf b/vendor/github.com/containers/storage/types/storage_broken.conf new file mode 100644 index 0000000000..3bca1d9784 --- /dev/null +++ b/vendor/github.com/containers/storage/types/storage_broken.conf @@ -0,0 +1,18 @@ +# This file is is a TEST configuration file for all tools +# that use the containers/storage library. +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +foo = "bar" + +[storage] + +# Default Storage Driver +driver = "" + +# Temporary storage location +runroot = "/run/containers/test" + +[storage.options] +# Primary Read/Write location of container storage +graphroot = "/var/lib/containers/storage" + diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf new file mode 100644 index 0000000000..87b0c9bb16 --- /dev/null +++ b/vendor/github.com/containers/storage/types/storage_test.conf @@ -0,0 +1,45 @@ +# This file is is a TEST configuration file for all tools +# that use the containers/storage library. +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver +driver = "" + +# Temporary storage location +runroot = "$HOME/$UID/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "$HOME/$UID/containers/storage" + +# Storage path for rootless users +# +rootless_storage_path = "$HOME/$UID/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +remap-uids = "0:1000000000:30000" +remap-gids = "0:1500000000:60000" + +[storage.options.overlay] + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + + +[storage.options.thinpool] +# Storage Options for thinpool diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go new file mode 100644 index 0000000000..73134f82da --- /dev/null +++ b/vendor/github.com/containers/storage/types/utils.go @@ -0,0 +1,220 @@ +package types + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir(rootlessUID int) (string, error) { + path, err := getRootlessRuntimeDir(rootlessUID) + if err != nil { + return "", err + } + path = filepath.Join(path, "containers") + if err := os.MkdirAll(path, 0o700); err != nil { + return "", fmt.Errorf("unable to make rootless runtime: %w", err) + } + return path, nil +} + +type rootlessRuntimeDirEnvironment interface { + getProcCommandFile() string + getRunUserDir() string + getTmpPerUserDir() string + + homeDirGetRuntimeDir() (string, error) + systemLstat(string) (*system.StatT, error) + homedirGet() string +} + +type rootlessRuntimeDirEnvironmentImplementation struct { + procCommandFile string + runUserDir string + tmpPerUserDir string +} + +func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string { + return env.procCommandFile +} + +func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string { + return env.runUserDir +} + +func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string { + return env.tmpPerUserDir +} + +func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) { + return homedir.GetRuntimeDir() +} + +func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) { + return system.Lstat(path) +} + +func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string { + return homedir.Get() +} + +func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool { + st, err := env.systemLstat(dir) + return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0o700 == 0o700 && st.Mode()&0o066 == 0o000 +} + +// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing. +// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function. +func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) { + runtimeDir, err := env.homeDirGetRuntimeDir() + if err == nil { + return runtimeDir, nil + } + + initCommand, err := os.ReadFile(env.getProcCommandFile()) + if err != nil || string(initCommand) == "systemd" { + runUserDir := env.getRunUserDir() + if isRootlessRuntimeDirOwner(runUserDir, env) { + return runUserDir, nil + } + } + + tmpPerUserDir := env.getTmpPerUserDir() + if tmpPerUserDir != "" { + if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) { + if err := os.Mkdir(tmpPerUserDir, 0o700); err != nil { + logrus.Errorf("Failed to create temp directory for user: %v", err) + } else { + return tmpPerUserDir, nil + } + } else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) { + return tmpPerUserDir, nil + } + } + + homeDir := env.homedirGet() + if homeDir == "" { + return "", errors.New("neither XDG_RUNTIME_DIR nor temp dir nor HOME was set non-empty") + } + resolvedHomeDir, err := filepath.EvalSymlinks(homeDir) + if err != nil { + return "", err + } + return filepath.Join(resolvedHomeDir, "rundir"), nil +} + +func getRootlessRuntimeDir(rootlessUID int) (string, error) { + return getRootlessRuntimeDirIsolated( + rootlessRuntimeDirEnvironmentImplementation{ + "/proc/1/comm", + fmt.Sprintf("/run/user/%d", rootlessUID), + fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID), + }, + ) +} + +// getRootlessDirInfo returns the parent path of where the storage for containers and +// volumes will be in rootless mode +func getRootlessDirInfo(rootlessUID int) (string, string, error) { + rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID) + if err != nil { + return "", "", err + } + + dataDir, err := homedir.GetDataHome() + if err == nil { + return dataDir, rootlessRuntime, nil + } + + home := homedir.Get() + if home == "" { + return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty: %w", err) + } + // runc doesn't like symlinks in the rootfs path, and at least + // on CoreOS /home is a symlink to /var/home, so resolve any symlink. + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + return "", "", err + } + dataDir = filepath.Join(resolvedHome, ".local", "share") + + return dataDir, rootlessRuntime, nil +} + +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +func expandEnvPath(path string, rootlessUID int) (string, error) { + var err error + path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1) + path = os.ExpandEnv(path) + newpath, err := filepath.EvalSymlinks(path) + if err != nil { + newpath = filepath.Clean(path) + } + return newpath, nil +} + +func DefaultConfigFile(rootless bool) (string, error) { + if defaultConfigFileSet { + return defaultConfigFile, nil + } + + if path, ok := os.LookupEnv(storageConfEnv); ok { + return path, nil + } + if !rootless { + if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + return defaultOverrideConfigFile, nil + } + return defaultConfigFile, nil + } + + if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { + return filepath.Join(configHome, "containers/storage.conf"), nil + } + home := homedir.Get() + if home == "" { + return "", errors.New("cannot determine user's homedir") + } + return filepath.Join(home, ".config/containers/storage.conf"), nil +} + +func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) { + prevReloadConfig.mutex.Lock() + defer prevReloadConfig.mutex.Unlock() + + fi, err := os.Stat(configFile) + if err != nil { + if !os.IsNotExist(err) { + logrus.Warningf("Failed to read %s %v\n", configFile, err.Error()) + } + return + } + + mtime := fi.ModTime() + if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { + *storeOptions = *prevReloadConfig.storeOptions + return + } + + ReloadConfigurationFile(configFile, storeOptions) + + prevReloadConfig.storeOptions = storeOptions + prevReloadConfig.mod = mtime + prevReloadConfig.configFile = configFile +} diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go new file mode 100644 index 0000000000..32ae830bea --- /dev/null +++ b/vendor/github.com/containers/storage/userns.go @@ -0,0 +1,311 @@ +package storage + +import ( + "fmt" + "os" + "os/user" + "path/filepath" + "strconv" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/unshare" + "github.com/containers/storage/types" + libcontainerUser "github.com/opencontainers/runc/libcontainer/user" + "github.com/sirupsen/logrus" +) + +// getAdditionalSubIDs looks up the additional IDs configured for +// the specified user. +// The argument USERNAME is ignored for rootless users, as it is not +// possible to use an arbitrary entry in /etc/sub*id. +// Differently, if the username is not specified for root users, a +// default name is used. +func getAdditionalSubIDs(username string) (*idSet, *idSet, error) { + var uids, gids *idSet + + if unshare.IsRootless() { + username = os.Getenv("USER") + if username == "" { + var id string + if os.Geteuid() == 0 { + id = strconv.Itoa(unshare.GetRootlessUID()) + } else { + id = strconv.Itoa(os.Geteuid()) + } + userID, err := user.LookupId(id) + if err == nil { + username = userID.Username + } + } + } else if username == "" { + username = RootAutoUserNsUser + } + mappings, err := idtools.NewIDMappings(username, username) + if err != nil { + logrus.Errorf("Cannot find mappings for user %q: %v", username, err) + } else { + uids = getHostIDs(mappings.UIDs()) + gids = getHostIDs(mappings.GIDs()) + } + return uids, gids, nil +} + +// getAvailableIDs returns the list of ranges that are usable by the current user. +// When running as root, it looks up the additional IDs assigned to the specified user. +// When running as rootless, the mappings assigned to the unprivileged user are converted +// to the IDs inside of the initial rootless user namespace. +func (s *store) getAvailableIDs() (*idSet, *idSet, error) { + if s.additionalUIDs == nil { + uids, gids, err := getAdditionalSubIDs(s.autoUsernsUser) + if err != nil { + return nil, nil, err + } + // Store the result so we don't need to look it up again next time + s.additionalUIDs, s.additionalGIDs = uids, gids + } + + if !unshare.IsRootless() { + // No mapping to inner namespace needed + return s.additionalUIDs, s.additionalGIDs, nil + } + + // We are already inside of the rootless user namespace. + // We need to remap the configured mappings to what is available + // inside of the rootless userns. + u := newIDSet([]interval{{start: 1, end: s.additionalUIDs.size() + 1}}) + g := newIDSet([]interval{{start: 1, end: s.additionalGIDs.size() + 1}}) + return u, g, nil +} + +// nobodyUser returns the UID and GID of the "nobody" user. Hardcode its value +// for simplicity. +const nobodyUser = 65534 + +// parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and +// /etc/group files. +func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { + if passwdFile == "" { + passwdFile = filepath.Join(containerMount, "etc/passwd") + } + if groupFile == "" { + groupFile = filepath.Join(groupFile, "etc/group") + } + + size := 0 + + users, err := libcontainerUser.ParsePasswdFile(passwdFile) + if err == nil { + for _, u := range users { + // Skip the "nobody" user otherwise we end up with 65536 + // ids with most images + if u.Name == "nobody" { + continue + } + if u.Uid > size && u.Uid != nobodyUser { + size = u.Uid + } + if u.Gid > size && u.Gid != nobodyUser { + size = u.Gid + } + } + } + + groups, err := libcontainerUser.ParseGroupFile(groupFile) + if err == nil { + for _, g := range groups { + if g.Name == "nobody" { + continue + } + if g.Gid > size && g.Gid != nobodyUser { + size = g.Gid + } + } + } + + return uint32(size) +} + +// getMaxSizeFromImage returns the maximum ID used by the specified image. +// On entry, rlstore must be locked for writing, and lstores must be locked for reading. +func (s *store) getMaxSizeFromImage(image *Image, rlstore rwLayerStore, lstores []roLayerStore, passwdFile, groupFile string) (_ uint32, retErr error) { + layerStores := append([]roLayerStore{rlstore}, lstores...) + + size := uint32(0) + + var topLayer *Layer + layerName := image.TopLayer +outer: + for { + for _, ls := range layerStores { + layer, err := ls.Get(layerName) + if err != nil { + continue + } + if image.TopLayer == layerName { + topLayer = layer + } + for _, uid := range layer.UIDs { + if uid >= size { + size = uid + 1 + } + } + for _, gid := range layer.GIDs { + if gid >= size { + size = gid + 1 + } + } + layerName = layer.Parent + if layerName == "" { + break outer + } + continue outer + } + return 0, fmt.Errorf("cannot find layer %q", layerName) + } + + layerOptions := &LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, + } + + // We need to create a temporary layer so we can mount it and lookup the + // maximum IDs used. + clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil) + if err != nil { + return 0, err + } + defer func() { + if err2 := rlstore.Delete(clayer.ID); err2 != nil { + if retErr == nil { + retErr = fmt.Errorf("deleting temporary layer %#v: %w", clayer.ID, err2) + } else { + logrus.Errorf("Error deleting temporary layer %#v: %v", clayer.ID, err2) + } + } + }() + + mountOptions := drivers.MountOpts{ + MountLabel: "", + UidMaps: nil, + GidMaps: nil, + Options: nil, + } + + mountpoint, err := rlstore.Mount(clayer.ID, mountOptions) + if err != nil { + return 0, err + } + defer func() { + if _, err2 := rlstore.unmount(clayer.ID, true, false); err2 != nil { + if retErr == nil { + retErr = fmt.Errorf("unmounting temporary layer %#v: %w", clayer.ID, err2) + } else { + logrus.Errorf("Error unmounting temporary layer %#v: %v", clayer.ID, err2) + } + } + }() + + userFilesSize := parseMountedFiles(mountpoint, passwdFile, groupFile) + if userFilesSize > size { + size = userFilesSize + } + + return size, nil +} + +// getAutoUserNS creates an automatic user namespace +// If image != nil, On entry, rlstore must be locked for writing, and lstores must be locked for reading. +func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image, rlstore rwLayerStore, lstores []roLayerStore) ([]idtools.IDMap, []idtools.IDMap, error) { + requestedSize := uint32(0) + initialSize := uint32(1) + if options.Size > 0 { + requestedSize = options.Size + } + if options.InitialSize > 0 { + initialSize = options.InitialSize + } + + availableUIDs, availableGIDs, err := s.getAvailableIDs() + if err != nil { + return nil, nil, fmt.Errorf("cannot read mappings: %w", err) + } + + // Look at every container that is using a user namespace and store + // the intervals that are already used. + containers, err := s.Containers() + if err != nil { + return nil, nil, err + } + var usedUIDs, usedGIDs []idtools.IDMap + for _, c := range containers { + usedUIDs = append(usedUIDs, c.UIDMap...) + usedGIDs = append(usedGIDs, c.GIDMap...) + } + + size := requestedSize + + // If there is no requestedSize, lookup the maximum used IDs in the layers + // metadata. Make sure the size is at least s.autoNsMinSize and it is not + // bigger than s.autoNsMaxSize. + // This is a best effort heuristic. + if requestedSize == 0 { + size = initialSize + if s.autoNsMinSize > size { + size = s.autoNsMinSize + } + if image != nil { + sizeFromImage, err := s.getMaxSizeFromImage(image, rlstore, lstores, options.PasswdFile, options.GroupFile) + if err != nil { + return nil, nil, err + } + if sizeFromImage > size { + size = sizeFromImage + } + } + if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize { + return nil, nil, fmt.Errorf("the container needs a user namespace with size %v that is bigger than the maximum value allowed with userns=auto %v", size, s.autoNsMaxSize) + } + } + + return getAutoUserNSIDMappings( + int(size), + availableUIDs, availableGIDs, + usedUIDs, usedGIDs, + options.AdditionalUIDMappings, options.AdditionalGIDMappings, + ) +} + +// getAutoUserNSIDMappings computes the user/group id mappings for the automatic user namespace. +func getAutoUserNSIDMappings( + size int, + availableUIDs, availableGIDs *idSet, + usedUIDMappings, usedGIDMappings, additionalUIDMappings, additionalGIDMappings []idtools.IDMap, +) ([]idtools.IDMap, []idtools.IDMap, error) { + usedUIDs := getHostIDs(append(usedUIDMappings, additionalUIDMappings...)) + usedGIDs := getHostIDs(append(usedGIDMappings, additionalGIDMappings...)) + + // Exclude additional uids and gids from requested range. + targetIDs := newIDSet([]interval{{start: 0, end: size}}) + requestedContainerUIDs := targetIDs.subtract(getContainerIDs(additionalUIDMappings)) + requestedContainerGIDs := targetIDs.subtract(getContainerIDs(additionalGIDMappings)) + + // Make sure the specified additional IDs are not used as part of the automatic + // mapping + availableUIDs, err := availableUIDs.subtract(usedUIDs).findAvailable(requestedContainerUIDs.size()) + if err != nil { + return nil, nil, err + } + availableGIDs, err = availableGIDs.subtract(usedGIDs).findAvailable(requestedContainerGIDs.size()) + if err != nil { + return nil, nil, err + } + + uidMap := append(availableUIDs.zip(requestedContainerUIDs), additionalUIDMappings...) + gidMap := append(availableGIDs.zip(requestedContainerGIDs), additionalGIDMappings...) + return uidMap, gidMap, nil +} diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go new file mode 100644 index 0000000000..6b5a3421a8 --- /dev/null +++ b/vendor/github.com/containers/storage/utils.go @@ -0,0 +1,74 @@ +package storage + +import ( + "fmt" + + "github.com/containers/storage/types" +) + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*types.IDMappingOptions, error) { + return types.ParseIDMapping(UIDMapSlice, GIDMapSlice, subUIDMap, subGIDMap) +} + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir(rootlessUID int) (string, error) { + return types.GetRootlessRuntimeDir(rootlessUID) +} + +// DefaultStoreOptionsAutoDetectUID returns the default storage options for containers +func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) { + return types.DefaultStoreOptionsAutoDetectUID() +} + +// DefaultStoreOptions returns the default storage options for containers +func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) { + return types.DefaultStoreOptions(rootless, rootlessUID) +} + +func validateMountOptions(mountOptions []string) error { + var Empty struct{} + // Add invalid options for ImageMount() here. + invalidOptions := map[string]struct{}{ + "rw": Empty, + } + + for _, opt := range mountOptions { + if _, ok := invalidOptions[opt]; ok { + return fmt.Errorf(" %q option not supported", opt) + } + } + return nil +} + +func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) { + var result []string + switch op { + case setNames: + // ignore all old names and just return new names + result = opParameters + case removeNames: + // remove given names from old names + result = make([]string, 0, len(oldNames)) + for _, name := range oldNames { + // only keep names in final result which do not intersect with input names + // basically `result = oldNames - opParameters` + nameShouldBeRemoved := false + for _, opName := range opParameters { + if name == opName { + nameShouldBeRemoved = true + } + } + if !nameShouldBeRemoved { + result = append(result, name) + } + } + case addNames: + result = make([]string, 0, len(opParameters)+len(oldNames)) + result = append(result, opParameters...) + result = append(result, oldNames...) + default: + return result, errInvalidUpdateNameOperation + } + return dedupeStrings(result), nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE new file mode 100644 index 0000000000..bec842f294 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -0,0 +1,28 @@ +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md new file mode 100644 index 0000000000..4eca0f2355 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -0,0 +1,79 @@ +## `filepath-securejoin` ## + +[![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml) + +An implementation of `SecureJoin`, a [candidate for inclusion in the Go +standard library][go#20126]. The purpose of this function is to be a "secure" +alternative to `filepath.Join`, and in particular it provides certain +guarantees that are not provided by `filepath.Join`. + +> **NOTE**: This code is *only* safe if you are not at risk of other processes +> modifying path components after you've used `SecureJoin`. If it is possible +> for a malicious process to modify path components of the resolved path, then +> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There +> are some Linux kernel patches I'm working on which might allow for a better +> solution.][lwn-obeneath] +> +> In addition, with a slightly modified API it might be possible to use +> `O_PATH` and verify that the opened path is actually the resolved one -- but +> I have not done that yet. I might add it in the future as a helper function +> to help users verify the path (we can't just return `/proc/self/fd/` +> because that doesn't always work transparently for all users). + +This is the function prototype: + +```go +func SecureJoin(root, unsafePath string) (string, error) +``` + +This library **guarantees** the following: + +* If no error is set, the resulting string **must** be a child path of + `root` and will not contain any symlink path components (they will all be + expanded). + +* When expanding symlinks, all symlink path components **must** be resolved + relative to the provided root. In particular, this can be considered a + userspace implementation of how `chroot(2)` operates on file paths. Note that + these symlinks will **not** be expanded lexically (`filepath.Clean` is not + called on the input before processing). + +* Non-existent path components are unaffected by `SecureJoin` (similar to + `filepath.EvalSymlinks`'s semantics). + +* The returned path will always be `filepath.Clean`ed and thus not contain any + `..` components. + +A (trivial) implementation of this function on GNU/Linux systems could be done +with the following (note that this requires root privileges and is far more +opaque than the implementation in this library, and also requires that +`readlink` is inside the `root` path): + +```go +package securejoin + +import ( + "os/exec" + "path/filepath" +) + +func SecureJoin(root, unsafePath string) (string, error) { + unsafePath = string(filepath.Separator) + unsafePath + cmd := exec.Command("chroot", root, + "readlink", "--canonicalize-missing", "--no-newline", unsafePath) + output, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + expanded := string(output) + return filepath.Join(root, expanded), nil +} +``` + +[lwn-obeneath]: https://lwn.net/Articles/767547/ +[go#20126]: https://github.com/golang/go/issues/20126 + +### License ### + +The license of this project is the same as Go, which is a BSD 3-clause license +available in the `LICENSE` file. diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION new file mode 100644 index 0000000000..abd410582d --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -0,0 +1 @@ +0.2.4 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go new file mode 100644 index 0000000000..aa32b85fb8 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -0,0 +1,125 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin is an implementation of the hopefully-soon-to-be-included +// SecureJoin helper that is meant to be part of the "path/filepath" package. +// The purpose of this project is to provide a PoC implementation to make the +// SecureJoin proposal (https://github.com/golang/go/issues/20126) more +// tangible. +package securejoin + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + "syscall" +) + +// IsNotExist tells you if err is an error that implies that either the path +// accessed does not exist (or path components don't exist). This is +// effectively a more broad version of os.IsNotExist. +func IsNotExist(err error) bool { + // Check that it's not actually an ENOTDIR, which in some cases is a more + // convoluted case of ENOENT (usually involving weird paths). + return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) +} + +// SecureJoinVFS joins the two given path components (similar to Join) except +// that the returned path is guaranteed to be scoped inside the provided root +// path (when evaluated). Any symbolic links in the path are evaluated with the +// given root treated as the root of the filesystem, similar to a chroot. The +// filesystem state is evaluated through the given VFS interface (if nil, the +// standard os.* family of functions are used). +// +// Note that the guarantees provided by this function only apply if the path +// components in the returned string are not modified (in other words are not +// replaced with symlinks on the filesystem) after this function has returned. +// Such a symlink race is necessarily out-of-scope of SecureJoin. +// +// Volume names in unsafePath are always discarded, regardless if they are +// provided via direct input or when evaluating symlinks. Therefore: +// +// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt" +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // Use the os.* VFS implementation if none was specified. + if vfs == nil { + vfs = osVFS{} + } + + unsafePath = filepath.FromSlash(unsafePath) + var path bytes.Buffer + n := 0 + for unsafePath != "" { + if n > 255 { + return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + } + + if v := filepath.VolumeName(unsafePath); v != "" { + unsafePath = unsafePath[len(v):] + } + + // Next path component, p. + i := strings.IndexRune(unsafePath, filepath.Separator) + var p string + if i == -1 { + p, unsafePath = unsafePath, "" + } else { + p, unsafePath = unsafePath[:i], unsafePath[i+1:] + } + + // Create a cleaned path, using the lexical semantics of /../a, to + // create a "scoped" path component which can safely be joined to fullP + // for evaluation. At this point, path.String() doesn't contain any + // symlink components. + cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) + if cleanP == string(filepath.Separator) { + path.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + // Figure out whether the path is a symlink. + fi, err := vfs.Lstat(fullP) + if err != nil && !IsNotExist(err) { + return "", err + } + // Treat non-existent path components the same as non-symlinks (we + // can't do any better here). + if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { + path.WriteString(p) + path.WriteRune(filepath.Separator) + continue + } + + // Only increment when we actually dereference a link. + n++ + + // It's a symlink, expand it by prepending it to the yet-unparsed path. + dest, err := vfs.Readlink(fullP) + if err != nil { + return "", err + } + // Absolute symlinks reset any work we've already done. + if filepath.IsAbs(dest) { + path.Reset() + } + unsafePath = dest + string(filepath.Separator) + unsafePath + } + + // We have to clean path.String() here because it may contain '..' + // components that are entirely lexical, but would be misleading otherwise. + // And finally do a final clean to ensure that root is also lexically + // clean. + fullP := filepath.Clean(string(filepath.Separator) + path.String()) + return filepath.Clean(root + fullP), nil +} + +// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library +// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +func SecureJoin(root, unsafePath string) (string, error) { + return SecureJoinVFS(root, unsafePath, nil) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go new file mode 100644 index 0000000000..a82a5eae11 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -0,0 +1,41 @@ +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import "os" + +// In future this should be moved into a separate package, because now there +// are several projects (umoci and go-mtree) that are using this sort of +// interface. + +// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is +// equivalent to using the standard os.* family of functions. This is mainly +// used for the purposes of mock testing, but also can be used to otherwise use +// SecureJoin with VFS-like system. +type VFS interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. These semantics are identical to + // os.Lstat. + Lstat(name string) (os.FileInfo, error) + + // Readlink returns the destination of the named symbolic link. These + // semantics are identical to os.Readlink. + Readlink(name string) (string, error) +} + +// osVFS is the "nil" VFS, in that it just passes everything through to the os +// module. +type osVFS struct{} + +// Lstat returns a FileInfo describing the named file. If the file is a +// symbolic link, the returned FileInfo describes the symbolic link. Lstat +// makes no attempt to follow the link. These semantics are identical to +// os.Lstat. +func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +// Readlink returns the destination of the named symbolic link. These +// semantics are identical to os.Readlink. +func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/docker/distribution/reference/helpers_deprecated.go b/vendor/github.com/docker/distribution/reference/helpers_deprecated.go new file mode 100644 index 0000000000..cbd119250a --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers_deprecated.go @@ -0,0 +1,34 @@ +package reference + +import "github.com/distribution/reference" + +// IsNameOnly returns true if reference only contains a repo name. +// +// Deprecated: use [reference.IsNameOnly]. +func IsNameOnly(ref reference.Named) bool { + return reference.IsNameOnly(ref) +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +// +// Deprecated: use [reference.FamiliarName]. +func FamiliarName(ref reference.Named) string { + return reference.FamiliarName(ref) +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +// +// Deprecated: use [reference.FamiliarString]. +func FamiliarString(ref reference.Reference) string { + return reference.FamiliarString(ref) +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See [path.Match] for supported patterns. +// +// Deprecated: use [reference.FamiliarMatch]. +func FamiliarMatch(pattern string, ref reference.Reference) (bool, error) { + return reference.FamiliarMatch(pattern, ref) +} diff --git a/vendor/github.com/docker/distribution/reference/normalize_deprecated.go b/vendor/github.com/docker/distribution/reference/normalize_deprecated.go new file mode 100644 index 0000000000..1b4a459d70 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize_deprecated.go @@ -0,0 +1,92 @@ +package reference + +import ( + "regexp" + + "github.com/distribution/reference" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest/digestset" +) + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +// +// Deprecated: use [reference.ParseNormalizedNamed]. +func ParseNormalizedNamed(s string) (reference.Named, error) { + return reference.ParseNormalizedNamed(s) +} + +// ParseDockerRef normalizes the image reference following the docker convention, +// which allows for references to contain both a tag and a digest. +// +// Deprecated: use [reference.ParseDockerRef]. +func ParseDockerRef(ref string) (reference.Named, error) { + return reference.ParseDockerRef(ref) +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +// +// Deprecated: use [reference.TagNameOnly]. +func TagNameOnly(ref reference.Named) reference.Named { + return reference.TagNameOnly(ref) +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +// +// Deprecated: use [reference.ParseAnyReference]. +func ParseAnyReference(ref string) (reference.Reference, error) { + return reference.ParseAnyReference(ref) +} + +// Functions and types below have been removed in distribution v3 and +// have not been ported to github.com/distribution/reference. See +// https://github.com/distribution/distribution/pull/3774 + +var ( + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + // + // Deprecated: support for short-identifiers is deprecated, and will be removed in v3. + ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier) + + shortIdentifier = `([a-f0-9]{6,64})` + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = regexp.MustCompile(`^` + shortIdentifier + `$`) +) + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +// +// Deprecated: support for short-identifiers is deprecated, and will be removed in v3. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return reference.ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/reference_deprecated.go b/vendor/github.com/docker/distribution/reference/reference_deprecated.go new file mode 100644 index 0000000000..5b732498eb --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference_deprecated.go @@ -0,0 +1,172 @@ +// Package reference is deprecated, and has moved to github.com/distribution/reference. +// +// Deprecated: use github.com/distribution/reference instead. +package reference + +import ( + "github.com/distribution/reference" + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + // + // Deprecated: use [reference.NameTotalLengthMax]. + NameTotalLengthMax = reference.NameTotalLengthMax +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + // + // Deprecated: use [reference.ErrReferenceInvalidFormat]. + ErrReferenceInvalidFormat = reference.ErrReferenceInvalidFormat + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + // + // Deprecated: use [reference.ErrTagInvalidFormat]. + ErrTagInvalidFormat = reference.ErrTagInvalidFormat + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + // + // Deprecated: use [reference.ErrDigestInvalidFormat]. + ErrDigestInvalidFormat = reference.ErrDigestInvalidFormat + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + // + // Deprecated: use [reference.ErrNameContainsUppercase]. + ErrNameContainsUppercase = reference.ErrNameContainsUppercase + + // ErrNameEmpty is returned for empty, invalid repository names. + // + // Deprecated: use [reference.ErrNameEmpty]. + ErrNameEmpty = reference.ErrNameEmpty + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + // + // Deprecated: use [reference.ErrNameTooLong]. + ErrNameTooLong = reference.ErrNameTooLong + + // ErrNameNotCanonical is returned when a name is not canonical. + // + // Deprecated: use [reference.ErrNameNotCanonical]. + ErrNameNotCanonical = reference.ErrNameNotCanonical +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +// +// Deprecated: use [reference.Reference]. +type Reference = reference.Reference + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +// +// Deprecated: use [reference.Field]. +type Field = reference.Field + +// AsField wraps a reference in a Field for encoding. +// +// Deprecated: use [reference.AsField]. +func AsField(ref reference.Reference) reference.Field { + return reference.AsField(ref) +} + +// Named is an object with a full name +// +// Deprecated: use [reference.Named]. +type Named = reference.Named + +// Tagged is an object which has a tag +// +// Deprecated: use [reference.Tagged]. +type Tagged = reference.Tagged + +// NamedTagged is an object including a name and tag. +// +// Deprecated: use [reference.NamedTagged]. +type NamedTagged reference.NamedTagged + +// Digested is an object which has a digest +// in which it can be referenced by +// +// Deprecated: use [reference.Digested]. +type Digested reference.Digested + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +// +// Deprecated: use [reference.Canonical]. +type Canonical reference.Canonical + +// Domain returns the domain part of the [Named] reference. +// +// Deprecated: use [reference.Domain]. +func Domain(named reference.Named) string { + return reference.Domain(named) +} + +// Path returns the name without the domain part of the [Named] reference. +// +// Deprecated: use [reference.Path]. +func Path(named reference.Named) (name string) { + return reference.Path(named) +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// +// Deprecated: Use [reference.Domain] or [reference.Path]. +func SplitHostname(named reference.Named) (string, string) { + return reference.SplitHostname(named) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// +// Deprecated: use [reference.Parse]. +func Parse(s string) (reference.Reference, error) { + return reference.Parse(s) +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// +// Deprecated: use [reference.ParseNamed]. +func ParseNamed(s string) (reference.Named, error) { + return reference.ParseNamed(s) +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +// +// Deprecated: use [reference.WithName]. +func WithName(name string) (reference.Named, error) { + return reference.WithName(name) +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +// +// Deprecated: use [reference.WithTag]. +func WithTag(name reference.Named, tag string) (reference.NamedTagged, error) { + return reference.WithTag(name, tag) +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +// +// Deprecated: use [reference.WithDigest]. +func WithDigest(name reference.Named, digest digest.Digest) (reference.Canonical, error) { + return reference.WithDigest(name, digest) +} + +// TrimNamed removes any tag or digest from the named reference. +// +// Deprecated: use [reference.TrimNamed]. +func TrimNamed(ref reference.Named) reference.Named { + return reference.TrimNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/regexp_deprecated.go b/vendor/github.com/docker/distribution/reference/regexp_deprecated.go new file mode 100644 index 0000000000..4b9c1b58eb --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp_deprecated.go @@ -0,0 +1,50 @@ +package reference + +import ( + "github.com/distribution/reference" +) + +// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:"). +// +// Deprecated: use [reference.DigestRegexp]. +var DigestRegexp = reference.DigestRegexp + +// DomainRegexp matches hostname or IP-addresses, optionally including a port +// number. It defines the structure of potential domain components that may be +// part of image names. This is purposely a subset of what is allowed by DNS to +// ensure backwards compatibility with Docker image names. It may be a subset of +// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between +// square brackets (excluding zone identifiers as defined by [RFC 6874] or special +// addresses such as IPv4-Mapped). +// +// Deprecated: use [reference.DomainRegexp]. +// +// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874. +var DomainRegexp = reference.DigestRegexp + +// IdentifierRegexp is the format for string identifier used as a +// content addressable identifier using sha256. These identifiers +// are like digests without the algorithm, since sha256 is used. +// +// Deprecated: use [reference.IdentifierRegexp]. +var IdentifierRegexp = reference.IdentifierRegexp + +// NameRegexp is the format for the name component of references, including +// an optional domain and port, but without tag or digest suffix. +// +// Deprecated: use [reference.NameRegexp]. +var NameRegexp = reference.NameRegexp + +// ReferenceRegexp is the full supported format of a reference. The regexp +// is anchored and has capturing groups for name, tag, and digest +// components. +// +// Deprecated: use [reference.ReferenceRegexp]. +var ReferenceRegexp = reference.ReferenceRegexp + +// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go]. +// +// Deprecated: use [reference.TagRegexp]. +// +// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28 +var TagRegexp = reference.TagRegexp diff --git a/vendor/github.com/docker/distribution/reference/sort_deprecated.go b/vendor/github.com/docker/distribution/reference/sort_deprecated.go new file mode 100644 index 0000000000..a73251b6f5 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/sort_deprecated.go @@ -0,0 +1,10 @@ +package reference + +import "github.com/distribution/reference" + +// Sort sorts string references preferring higher information references. +// +// Deprecated: use [reference.Sort]. +func Sort(references []string) []string { + return reference.Sort(references) +} diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 0000000000..f136c3433a --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 0000000000..cba66bc462 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,11 @@ +package api // import "github.com/docker/docker/api" + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion = "1.43" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier = "scratch" +) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 0000000000..19fc63d658 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,7 @@ +//go:build !windows +// +build !windows + +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 0000000000..590ba5479b --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 0000000000..f07a02737f --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 0000000000..7635b9f665 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,12145 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.43" +info: + title: "Docker Engine API" + version: "1.43" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.43) is used. + For example, calling `/info` is the same as calling `/v1.43/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. + For example: + + ``` + {"net.ipv4.ip_forward": "1"} + ``` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + + When used as `ContainerConfig` field in an image, `ContainerConfig` is an + optional field containing the configuration of the container that was last + committed when creating the image. + + Previous versions of Docker builder used this field to store build cache, + and it is not in active use anymore. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: "MAC address of the container." + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because containers/create currently + # does not support attaching to multiple networks, so the example request + # would be confusing if it showed that multiple networks can be contained + # in the EndpointsConfig. + # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: Name of the network's bridge (for example, `docker0`). + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: IPv6 unicast address using the link-local prefix. + type: "string" + example: "fe80::42:acff:fe11:1" + LinkLocalIPv6PrefixLen: + description: Prefix length of the IPv6 unicast address. + type: "integer" + example: "64" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey identifies the sandbox + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + # TODO is SecondaryIPAddresses actually used? + SecondaryIPAddresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO is SecondaryIPv6Addresses actually used? + SecondaryIPv6Addresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + GraphDriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + x-nullable: false + example: "2022-02-04T21:20:12.497794809Z" + Container: + description: | + The ID of the container that was used to create the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" + ContainerConfig: + $ref: "#/definitions/ContainerConfig" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "20.10.7" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ContainerConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Images are now stored + self-contained, and no longer use a parent-chain, making this field + an equivalent of the Size field. + + > **Deprecated**: this field is kept for backward compatibility, but + > will be removed in API v1.44. + type: "integer" + format: "int64" + example: 1239828 + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds sinds EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: |- + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Images are now stored + self-contained, and no longer use a parent-chain, making this field + an equivalent of the Size field. + + Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + type: "integer" + format: "int64" + example: 172064416 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + IPRange: + type: "string" + Gateway: + type: "string" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. + type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + MacAddress: + description: | + MAC address for the endpoint on this network. + type: "string" + example: "02:42:ac:11:00:04" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "logentries" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selectd log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerSummary: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + data to store as secret. + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + config data. + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "19.03.12" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "19.03.12" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.40" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.12" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.13.14" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "4.19.76-linuxkit" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: "Indicates if `bridge-nf-call-iptables` is available on the host." + type: "boolean" + example: true + BridgeNfIp6tables: + description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "4.9.38-moby" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Alpine Linux v3.5" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "16.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "24.0.2" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + - "WARNING: bridge-nf-call-iptables is disabled" + - "WARNING: bridge-nf-call-ip6tables is disabled" + + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior, and enables the daemon to + push nondistributable artifacts to all registries whose resolved IP + address is within the subnet described by the CIDR syntax. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior for the specified + registries. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + type: "array" + items: + type: "string" + example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.docker.distribution.manifest.v2+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 3987495 + # TODO Not yet including these fields for now, as they are nil / omitted in our response. + # urls: + # description: | + # List of URLs from which this object MAY be downloaded. + # type: "array" + # items: + # type: "string" + # format: "uri" + # annotations: + # description: | + # Arbitrary metadata relating to the targeted content. + # type: "object" + # additionalProperties: + # type: "string" + # platform: + # $ref: "#/definitions/OCIPlatform" + + OCIPlatform: + type: "object" + x-go-name: Platform + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerInspectResponse" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + $ref: "#/definitions/ContainerState" + Image: + description: "The container's image ID" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + Platform: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + description: "IDs of exec instances that are running in the container." + type: "array" + items: + type: "string" + x-nullable: true + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: | + The size of files that have been created or changed by this + container. + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Healthcheck: + Test: ["CMD-SHELL", "exit 0"] + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + ExecIDs: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + IpcMode: "" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + Health: + Status: "healthy" + FailingStreak: 0 + Log: + - Start: "2019-12-22T10:59:05.6385933Z" + End: "2019-12-22T10:59:05.8078452Z" + ExitCode: 0 + Output: "" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerTopResponse" + description: "OK response to ContainerTop operation" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: | + Each process running in the container, where each is process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + title: "ContainerUpdateResponse" + description: "OK response to ContainerUpdate operation" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: + Detach: false + Tty: true + ConsoleSize: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + title: "NetworkCreateResponse" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: | + Check for networks with duplicate names. Since Network is + primarily keyed based on a random ID and not on the name, and + network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed + way to check for duplicates. CheckDuplicate is there to provide + a best effort checking of any networks which has the same name + but it is not guaranteed to catch all name collisions. + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + title: "ServiceCreateResponse" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 0000000000..9ee329a2fb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,7 @@ +package types // import "github.com/docker/docker/api/types" +import "github.com/docker/docker/api/types/registry" + +// AuthConfig contains authorization information for connecting to a Registry. +// +// Deprecated: use github.com/docker/docker/api/types/registry.AuthConfig +type AuthConfig = registry.AuthConfig diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 0000000000..bf3463b90e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 0000000000..d8cd306135 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,444 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string `json:"ID"` + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// NewHijackedResponse intializes a HijackedResponse type +func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { + return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + mediaType string + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. +// returns false if HTTP Content-Type is not relevant, and container must be inspected +func (h *HijackedResponse) MediaType() (string, bool) { + if h.mediaType == "" { + return "", false + } + return h.mediaType, true +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]registry.AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// ImageListOptions holds parameters to list images with. +type ImageListOptions struct { + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. + Filters filters.Args + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // ContainerCount indicates whether container count should be computed. + ContainerCount bool +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + Platform string +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +// ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 0000000000..7d5930bbeb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,67 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *ocispec.Platform + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go b/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go new file mode 100644 index 0000000000..6b4b47390d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go @@ -0,0 +1,6 @@ +package container + +// ContainerChangeResponseItem change item in response to ContainerChanges operation +// +// Deprecated: use [FilesystemChange]. +type ContainerChangeResponseItem = FilesystemChange diff --git a/vendor/github.com/docker/docker/api/types/container/change_type.go b/vendor/github.com/docker/docker/api/types/container/change_type.go new file mode 100644 index 0000000000..fe8d6d3696 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/change_type.go @@ -0,0 +1,15 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ChangeType Kind of change +// +// Can be one of: +// +// - `0`: Modified ("C") +// - `1`: Added ("A") +// - `2`: Deleted ("D") +// +// swagger:model ChangeType +type ChangeType uint8 diff --git a/vendor/github.com/docker/docker/api/types/container/change_types.go b/vendor/github.com/docker/docker/api/types/container/change_types.go new file mode 100644 index 0000000000..3a3a83866e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/change_types.go @@ -0,0 +1,23 @@ +package container + +const ( + // ChangeModify represents the modify operation. + ChangeModify ChangeType = 0 + // ChangeAdd represents the add operation. + ChangeAdd ChangeType = 1 + // ChangeDelete represents the delete operation. + ChangeDelete ChangeType = 2 +) + +func (ct ChangeType) String() string { + switch ct { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + default: + return "" + } +} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 0000000000..077583e66c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,96 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "io" + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// StopOptions holds the options to stop or restart a container. +type StopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If not value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// ExecStartOptions holds the options to start container's exec. +type ExecStartOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + ConsoleSize *[2]uint `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go new file mode 100644 index 0000000000..63381da367 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody OK response to ContainerTop operation +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process + // is an array of values corresponding to the titles. + // + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 0000000000..c10f175ea8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go new file mode 100644 index 0000000000..aa0e7f7d07 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/create_response.go @@ -0,0 +1,19 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse ContainerCreateResponse +// +// OK response to ContainerCreate operation +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go b/vendor/github.com/docker/docker/api/types/container/filesystem_change.go new file mode 100644 index 0000000000..9e9c2ad1d5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/filesystem_change.go @@ -0,0 +1,19 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// FilesystemChange Change in the container's filesystem. +// +// swagger:model FilesystemChange +type FilesystemChange struct { + + // kind + // Required: true + Kind ChangeType `json:"Kind"` + + // Path to file or directory that has changed. + // + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go new file mode 100644 index 0000000000..d4e6f55375 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -0,0 +1,456 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" +) + +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// cgroup namespace modes for containers +const ( + CgroupnsModeEmpty CgroupnsMode = "" + CgroupnsModePrivate CgroupnsMode = "private" + CgroupnsModeHost CgroupnsMode = "host" +) + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == CgroupnsModePrivate +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == CgroupnsModeHost +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == CgroupnsModeEmpty +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// Isolation modes for containers +const ( + IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) + IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon + IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode + IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode +) + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + // TODO consider making isolation-mode strict (case-sensitive) + v := Isolation(strings.ToLower(string(i))) + return v == IsolationDefault || v == IsolationEmpty +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationHyperV +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationProcess +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IpcMode constants +const ( + IPCModeNone IpcMode = "none" + IPCModeHost IpcMode = "host" + IPCModeContainer IpcMode = "container" + IPCModePrivate IpcMode = "private" + IPCModeShareable IpcMode = "shareable" +) + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == IPCModePrivate +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == IPCModeHost +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == IPCModeShareable +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == IPCModeNone +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !n.IsHost() +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + return n == "" || n.IsHost() +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + _, ok := containerID(string(c)) + return ok +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. + return c == "" || c.IsContainer() +} + +// Container returns the ID or name of the container whose cgroup will be used. +func (c CgroupSpec) Container() (idOrName string) { + idOrName, _ = containerID(string(c)) + return idOrName +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !n.IsHost() +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + return n == "" || n.IsHost() +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + return n == "" || n.IsHost() || validContainer(string(n)) +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset LogMode = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DeviceRequests []DeviceRequest // List of device requests for device drivers + + // KernelMemory specifies the kernel memory limit (in bytes) for the container. + // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. + KernelMemory int64 `json:",omitempty"` + KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + ConsoleSize [2]uint // Initial console size (height,width) + Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} + +// containerID splits "container:" values. It returns the container +// ID or name, and whether an ID/name was found. It returns an empty string and +// a "false" if the value does not have a "container:" prefix. Further validation +// of the returned, including checking if the value is empty, should be handled +// by the caller. +func containerID(val string) (idOrName string, ok bool) { + k, v, hasSep := strings.Cut(val, ":") + if !hasSep || k != "container" { + return "", false + } + return v, true +} + +// validContainer checks if the given value is a "container:" mode with +// a non-empty name/ID. +func validContainer(val string) bool { + id, ok := containerID(val) + return ok && id != "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 0000000000..24c4fa8d90 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,42 @@ +//go:build !windows +// +build !windows + +package container // import "github.com/docker/docker/api/types/container" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 0000000000..99f803a5bb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,40 @@ +package container // import "github.com/docker/docker/api/types/container" + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go new file mode 100644 index 0000000000..ab56d4eed8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go @@ -0,0 +1,12 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitExitError container waiting error, if any +// swagger:model WaitExitError +type WaitExitError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go new file mode 100644 index 0000000000..84fc6afddc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_response.go @@ -0,0 +1,18 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitResponse ContainerWaitResponse +// +// OK response to ContainerWait operation +// swagger:model WaitResponse +type WaitResponse struct { + + // error + Error *WaitExitError `json:"Error,omitempty"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 0000000000..cd8311f99c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 0000000000..dc942d9d9e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go new file mode 100644 index 0000000000..f84f034cd5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response_ext.go @@ -0,0 +1,6 @@ +package types + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go new file mode 100644 index 0000000000..9fe07e26fd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -0,0 +1,47 @@ +package events // import "github.com/docker/docker/api/types/events" + +// Type is used for event-types. +type Type = string + +// List of known event types. +const ( + BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. + ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. + ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. + DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. + ImageEventType Type = "image" // ImageEventType is the event type that images generate. + NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. + NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. + PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. + SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. + ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. + VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set of attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` // Deprecated: use Action instead. + ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. + From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. + + Type Type + Action string + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/filters/errors.go b/vendor/github.com/docker/docker/api/types/filters/errors.go new file mode 100644 index 0000000000..f52f694408 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/errors.go @@ -0,0 +1,37 @@ +package filters + +import "fmt" + +// invalidFilter indicates that the provided filter or its value is invalid +type invalidFilter struct { + Filter string + Value []string +} + +func (e invalidFilter) Error() string { + msg := "invalid filter" + if e.Filter != "" { + msg += " '" + e.Filter + if e.Value != nil { + msg = fmt.Sprintf("%s=%s", msg, e.Value) + } + msg += "'" + } + return msg +} + +// InvalidParameter marks this error as ErrInvalidParameter +func (e invalidFilter) InvalidParameter() {} + +// unreachableCode is an error indicating that the code path was not expected to be reached. +type unreachableCode struct { + Filter string + Value []string +} + +// System marks this error as ErrSystem +func (e unreachableCode) System() {} + +func (e unreachableCode) Error() string { + return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value) +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 0000000000..0c39ab5f18 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,346 @@ +/* +Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" + +import ( + "encoding/json" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores a mapping of keys to a set of multiple values. +type Args struct { + fields map[string]map[string]bool +} + +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string +} + +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// Keys returns all the keys in list of Args +func (args Args) Keys() []string { + keys := make([]string, 0, len(args.fields)) + for k := range args.fields { + keys = append(keys, k) + } + return keys +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte("{}"), nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + buf, err := json.Marshal(a) + return string(buf), err +} + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: do not use in any new code; use ToJSON instead +func ToParamWithVersion(version string, a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + + if version != "" && versions.LessThan(version, "1.22") { + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err + } + + return ToJSON(a) +} + +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil + } + + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, &invalidFilter{} + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true + } else { + args.fields[key] = map[string]bool{value: true} + } +} + +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) + } + } +} + +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) +} + +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] + + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for value := range fieldValues { + testK, testV, hasValue := strings.Cut(value, "=") + + v, ok := sources[testK] + if !ok { + return false + } + if hasValue && testV != v { + return false + } + } + + return true +} + +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { + return true + } + + fieldValues := args.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// GetBoolOrDefault returns a boolean value of the key if the key is present +// and is intepretable as a boolean value. Otherwise the default value is returned. +// Error is not nil only if the filter values are not valid boolean or are conflicting. +func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { + fieldValues, ok := args.fields[key] + + if !ok { + return defaultValue, nil + } + + if len(fieldValues) == 0 { + return defaultValue, &invalidFilter{key, nil} + } + + isFalse := fieldValues["0"] || fieldValues["false"] + isTrue := fieldValues["1"] || fieldValues["true"] + + conflicting := isFalse && isTrue + invalid := !isFalse && !isTrue + + if conflicting || invalid { + return defaultValue, &invalidFilter{key, args.Get(key)} + } else if isFalse { + return false, nil + } else if isTrue { + return true, nil + } + + // This code shouldn't be reached. + return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)} +} + +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(args.fields[key]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { + return true + } + + fieldValues := args.fields[key] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] + return ok +} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { + if !accepted[name] { + return &invalidFilter{name, nil} + } + } + return nil +} + +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { + return nil + } + for v := range args.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go new file mode 100644 index 0000000000..ce3deb331c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about the storage driver used to store the container's and +// image's filesystem. +// +// swagger:model GraphDriverData +type GraphDriverData struct { + + // Low-level storage metadata, provided as key/value pairs. + // + // This information is driver-specific, and depends on the storage-driver + // in use, and should be used for informational purposes only. + // + // Required: true + Data map[string]string `json:"Data"` + + // Name of the storage driver. + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 0000000000..7592d2f8b1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go new file mode 100644 index 0000000000..e302bb0aeb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -0,0 +1,36 @@ +package image // import "github.com/docker/docker/api/types/image" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem individual image layer information in response to ImageHistory operation +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go new file mode 100644 index 0000000000..3cefecb0da --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -0,0 +1,9 @@ +package image + +import ocispec "github.com/opencontainers/image-spec/specs-go/v1" + +// GetImageOpts holds parameters to inspect an image. +type GetImageOpts struct { + Platform *ocispec.Platform + Details bool +} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go new file mode 100644 index 0000000000..b9a65a0d8e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 0000000000..0f6f144840 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,94 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // Number of containers using this image. Includes both stopped and running + // containers. + // + // This size is not calculated by default, and depends on which API endpoint + // is used. `-1` indicates that the value has not been set / calculated. + // + // Required: true + Containers int64 `json:"Containers"` + + // Date and time at which the image was created as a Unix timestamp + // (number of seconds sinds EPOCH). + // + // Required: true + Created int64 `json:"Created"` + + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + // + // Required: true + ID string `json:"Id"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + // + // Required: true + ParentID string `json:"ParentId"` + + // List of content-addressable digests of locally available image manifests + // that the image is referenced from. Multiple manifests can refer to the + // same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + // + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // List of image names/tags in the local image cache that reference this + // image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + // + // Required: true + RepoTags []string `json:"RepoTags"` + + // Total size of image layers that are shared between this image and other + // images. + // + // This size is not calculated by default. `-1` indicates that the value + // has not been set / calculated. + // + // Required: true + SharedSize int64 `json:"SharedSize"` + + // Total size of the image including all layers it is composed of. + // + // Required: true + Size int64 `json:"Size"` + + // Total size of the image including all layers it is composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Images are now stored + // self-contained, and no longer use a parent-chain, making this field + // an equivalent of the Size field. + // + // Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + VirtualSize int64 `json:"VirtualSize,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 0000000000..ac4ce62231 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,140 @@ +package mount // import "github.com/docker/docker/api/types/mount" + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" + // TypeCluster is the type for Swarm Cluster Volumes. + TypeCluster Type = "cluster" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` + ClusterOptions *ClusterOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` + CreateMountpoint bool `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} + +// ClusterOptions specifies options for a Cluster volume. +type ClusterOptions struct { + // intentionally empty +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 0000000000..437b184c67 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,126 @@ +package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" +) + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} + +var acceptedFilters = map[string]bool{ + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return filter.Validate(acceptedFilters) +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 0000000000..abae48b9ab --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,203 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 0000000000..5699010675 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 0000000000..32962dc2eb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 0000000000..c82f204e87 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 0000000000..5c031cf8b5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 0000000000..60d1fb5ad8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,71 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 0000000000..d91234744c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // Host IP address that the container's port is mapped to + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go new file mode 100644 index 0000000000..97a924e374 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go @@ -0,0 +1,99 @@ +package registry // import "github.com/docker/docker/api/types/registry" +import ( + "encoding/base64" + "encoding/json" + "io" + "strings" + + "github.com/pkg/errors" +) + +// AuthHeader is the name of the header used to send encoded registry +// authorization credentials for registry operations (push/pull). +const AuthHeader = "X-Registry-Auth" + +// AuthConfig contains authorization information for connecting to a Registry. +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + +// EncodeAuthConfig serializes the auth configuration as a base64url encoded +// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header. +// +// For details on base64url encoding, see: +// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +func EncodeAuthConfig(authConfig AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", errInvalidParameter{err} + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON +// authentication information as sent through the X-Registry-Auth header. +// +// This function always returns an AuthConfig, even if an error occurs. It is up +// to the caller to decide if authentication is required, and if the error can +// be ignored. +// +// For details on base64url encoding, see: +// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { + if authEncoded == "" { + return &AuthConfig{}, nil + } + + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + return decodeAuthConfigFromReader(authJSON) +} + +// DecodeAuthConfigBody decodes authentication information as sent as JSON in the +// body of a request. This function is to provide backward compatibility with old +// clients and API versions. Current clients and API versions expect authentication +// to be provided through the X-Registry-Auth header. +// +// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an +// error occurs. It is up to the caller to decide if authentication is required, +// and if the error can be ignored. +func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { + return decodeAuthConfigFromReader(rdr) +} + +func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) { + authConfig := &AuthConfig{} + if err := json.NewDecoder(rdr).Decode(authConfig); err != nil { + // always return an (empty) AuthConfig to increase compatibility with + // the existing API. + return &AuthConfig{}, invalid(err) + } + return authConfig, nil +} + +func invalid(err error) error { + return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { return e.error } + +func (e errInvalidParameter) Unwrap() error { return e.error } diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 0000000000..f0a2113e40 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 0000000000..b83f5d7b2e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,120 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +import ( + "encoding/json" + "net" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor ocispec.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 0000000000..74ea64b1bb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 0000000000..20daebed14 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types // import "github.com/docker/docker/api/types" + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 0000000000..82921cebc1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice // import "github.com/docker/docker/api/types/strslice" + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 0000000000..5ded7dba8a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,48 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "strconv" + "time" +) + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// String implements fmt.Stringer interface. +func (v Version) String() string { + return strconv.FormatUint(v.Index, 10) +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 0000000000..16202ccce6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` + ConfigID string + ConfigName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 0000000000..af5e1c0bc2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,80 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + Config string + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + CapabilityAdd []string `json:",omitempty"` + CapabilityDrop []string `json:",omitempty"` + Ulimits []*units.Ulimit `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 0000000000..98ef3284d1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,121 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 0000000000..bb98d5eedc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,139 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` + CSIInfo []NodeCSIInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// NodeCSIInfo represents information about a CSI plugin available on the node +type NodeCSIInfo struct { + // PluginName is the name of the CSI plugin. + PluginName string `json:",omitempty"` + // NodeID is the ID of the node as reported by the CSI plugin. This is + // different from the swarm node ID. + NodeID string `json:",omitempty"` + // MaxVolumesPerNode is the maximum number of volumes that may be published + // to this node + MaxVolumesPerNode int64 `json:",omitempty"` + // AccessibleTopology indicates the location of this node in the CSI + // plugin's topology + AccessibleTopology *Topology `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) + +// Topology defines the CSI topology of this node. This type is a duplicate of +// github.com/docker/docker/api/types.Topology. Because the type definition +// is so simple and to avoid complicated structure or circular imports, we just +// duplicate it here. See that type for full documentation +type Topology struct { + Segments map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 0000000000..0c77403ccf --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,27 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 0000000000..98c2806c31 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 0000000000..e45045866a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,754 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: plugin.proto + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *PluginSpec) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, + 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, + 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, + 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, + 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, + 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, + 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, + 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, + 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, + 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, + 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, + 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, + 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, + 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, + 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, + 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 0000000000..9ef169046b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; + repeated string env = 5; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 0000000000..d5213ec981 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,36 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 0000000000..6eb452d24d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,202 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` + + // ServiceStatus is an optional, extra field indicating the number of + // desired and running tasks. It is provided primarily as a shortcut to + // calculating these values client-side, which otherwise would require + // listing all tasks for a service, an operation that could be + // computation and network expensive. + ServiceStatus *ServiceStatus `json:",omitempty"` + + // JobStatus is the status of a Service which is in one of ReplicatedJob or + // GlobalJob modes. It is absent on Replicated and Global services. + JobStatus *JobStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` + ReplicatedJob *ReplicatedJob `json:",omitempty"` + GlobalJob *GlobalJob `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +// ReplicatedJob is the a type of Service which executes a defined Tasks +// in parallel until the specified number of Tasks have succeeded. +type ReplicatedJob struct { + // MaxConcurrent indicates the maximum number of Tasks that should be + // executing simultaneously for this job at any given time. There may be + // fewer Tasks that MaxConcurrent executing simultaneously; for example, if + // there are fewer than MaxConcurrent tasks needed to reach + // TotalCompletions. + // + // If this field is empty, it will default to a max concurrency of 1. + MaxConcurrent *uint64 `json:",omitempty"` + + // TotalCompletions is the total number of Tasks desired to run to + // completion. + // + // If this field is empty, the value of MaxConcurrent will be used. + TotalCompletions *uint64 `json:",omitempty"` +} + +// GlobalJob is the type of a Service which executes a Task on every Node +// matching the Service's placement constraints. These tasks run to completion +// and then exit. +// +// This type is deliberately empty. +type GlobalJob struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} + +// ServiceStatus represents the number of running tasks in a service and the +// number of tasks desired to be running. +type ServiceStatus struct { + // RunningTasks is the number of tasks for the service actually in the + // Running state + RunningTasks uint64 + + // DesiredTasks is the number of tasks desired to be running by the + // service. For replicated services, this is the replica count. For global + // services, this is computed by taking the number of tasks with desired + // state of not-Shutdown. + DesiredTasks uint64 + + // CompletedTasks is the number of tasks in the state Completed, if this + // service is in ReplicatedJob or GlobalJob mode. This field must be + // cross-referenced with the service type, because the default value of 0 + // may mean that a service is not in a job mode, or it may mean that the + // job has yet to complete any tasks. + CompletedTasks uint64 +} + +// JobStatus is the status of a job-type service. +type JobStatus struct { + // JobIteration is a value increased each time a Job is executed, + // successfully or otherwise. "Executed", in this case, means the job as a + // whole has been started, not that an individual Task has been launched. A + // job is "Executed" when its ServiceSpec is updated. JobIteration can be + // used to disambiguate Tasks belonging to different executions of a job. + // + // Though JobIteration will increase with each subsequent execution, it may + // not necessarily increase by 1, and so JobIteration should not be used to + // keep track of the number of times a job has been executed. + JobIteration Version + + // LastExecution is the time that the job was last executed, as observed by + // Swarm manager. + LastExecution time.Time `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 0000000000..3eae4b9b29 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,237 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 + DataPathPort uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` +} + +// Status provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type Status struct { + // NodeState represents the state of the node. + NodeState LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 0000000000..ad3eeca0b7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,225 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` + + // JobIteration is the JobIteration of the Service that this Task was + // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is + // used to determine which Tasks belong to which run of the job. This field + // is absent if the Service mode is Replicated or Global. + JobIteration *Version `json:",omitempty"` + + // Volumes is the list of VolumeAttachments for this task. It specifies + // which particular volumes are to be used by this particular task, and + // fulfilling what mounts in the spec. + Volumes []VolumeAttachment +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory) which can be advertised by a +// node and requested to be reserved for a task. +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// Limit describes limits on resources which can be requested by a task. +type Limit struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + Pids int64 `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Limit `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} + +// VolumeAttachment contains the associating a Volume to a Task. +type VolumeAttachment struct { + // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. + ID string `json:",omitempty"` + + // Source, together with Target, indicates the Mount, as specified in the + // ContainerSpec, that this volume fulfills. + Source string `json:",omitempty"` + + // Target, together with Source, indicates the Mount, as specified + // in the ContainerSpec, that this volume fulfills. + Target string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go new file mode 100644 index 0000000000..cab5c32e3f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -0,0 +1,131 @@ +package time // import "github.com/docker/docker/api/types/time" + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has +// the format ("%d.%09d", time.Unix(), int64(time.Nanosecond())). +// If the incoming nanosecond portion is longer than 9 digits it is truncated. +// The expectation is that the seconds and nanoseconds will be used to create a +// time variable. For example: +// +// seconds, nanoseconds, _ := ParseTimestamp("1136073600.000000001",0) +// since := time.Unix(seconds, nanoseconds) +// +// returns seconds as defaultSeconds if value == "" +func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) { + if value == "" { + return defaultSeconds, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (sec int64, nsec int64, err error) { + s, n, ok := strings.Cut(value, ".") + sec, err = strconv.ParseInt(s, 10, 64) + if err != nil { + return sec, 0, err + } + if !ok { + return sec, 0, nil + } + nsec, err = strconv.ParseInt(n, 10, 64) + if err != nil { + return sec, nsec, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + nsec = int64(float64(nsec) * math.Pow(float64(10), float64(9-len(n)))) + return sec, nsec, nil +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 0000000000..b413e02000 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,811 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" + "github.com/docker/go-connections/nat" +) + +const ( + // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams + MediaTypeRawStream = "application/vnd.docker.raw-stream" + + // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams + MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Parent is the ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + Parent string + + // Comment is an optional message that can be set when committing or + // importing the image. + Comment string + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + Created string + + // Container is the ID of the container that was used to create the image. + // + // Depending on how the image was created, this field may be empty. + Container string + + // ContainerConfig is an optional field containing the configuration of the + // container that was last committed when creating the image. + // + // Previous versions of Docker builder used this field to store build cache, + // and it is not in active use anymore. + ContainerConfig *container.Config + + // DockerVersion is the version of Docker that was used to build the image. + // + // Depending on how the image was created, this field may be empty. + DockerVersion string + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + Author string + Config *container.Config + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // VirtualSize is the total size of the image including all layers it is + // composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // Deprecated: Unused in API 1.43 and up, but kept for backward compatibility with older API versions. + VirtualSize int64 `json:"VirtualSize,omitempty"` + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver GraphDriverData + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + // LastTagTime is the date and time at which the image was last tagged. + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *swarm.Status +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// NetworkAddressPool is a temp struct used by Info struct +type NetworkAddressPool struct { + Base string + Size int +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + for _, s := range strings.Split(opt, ",") { + k, v, ok := strings.Cut(s, "=") + if !ok { + return nil, fmt.Errorf("invalid security option %q", s) + } + if k == "" || v == "" { + return nil, errors.New("invalid empty security option") + } + if k == "name" { + secopt.Name = v + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only used by the Docker Swarm standalone API +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + // Type is the type of mount, see `Type` definitions in + // github.com/docker/docker/api/types/mount.Type + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. + Destination string + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + // "Legacy" runtime configuration for runc-compatible runtimes. + + Path string `json:"path,omitempty"` + Args []string `json:"runtimeArgs,omitempty"` + + // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. + + Type string `json:"runtimeType,omitempty"` + Options map[string]interface{} `json:"options,omitempty"` + + // This is exposed here only for internal use + ShimConfig *ShimConfig `json:"-"` +} + +// ShimConfig is used by runtime to configure containerd shims +type ShimConfig struct { + Binary string + Opts interface{} +} + +// DiskUsageObject represents an object type used for disk usage query filtering. +type DiskUsageObject string + +const ( + // ContainerObject represents a container DiskUsageObject. + ContainerObject DiskUsageObject = "container" + // ImageObject represents an image DiskUsageObject. + ImageObject DiskUsageObject = "image" + // VolumeObject represents a volume DiskUsageObject. + VolumeObject DiskUsageObject = "volume" + // BuildCacheObject represents a build-cache DiskUsageObject. + BuildCacheObject DiskUsageObject = "build-cache" +) + +// DiskUsageOptions holds parameters for system disk usage query. +type DiskUsageOptions struct { + // Types specifies what object types to include in the response. If empty, + // all object types are returned. + Types []DiskUsageObject +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*volume.Volume + BuildCache []*BuildCache + BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} + +// BuildCache contains information about a build cache record. +type BuildCache struct { + // ID is the unique ID of the build cache record. + ID string + // Parent is the ID of the parent build cache record. + // + // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. + Parent string `json:"Parent,omitempty"` + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. + Description string + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go new file mode 100644 index 0000000000..55fc5d3899 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -0,0 +1,420 @@ +package volume + +import ( + "github.com/docker/docker/api/types/swarm" +) + +// ClusterVolume contains options and information specific to, and only present +// on, Swarm CSI cluster volumes. +type ClusterVolume struct { + // ID is the Swarm ID of the volume. Because cluster volumes are Swarm + // objects, they have an ID, unlike non-cluster volumes, which only have a + // Name. This ID can be used to refer to the cluster volume. + ID string + + // Meta is the swarm metadata about this volume. + swarm.Meta + + // Spec is the cluster-specific options from which this volume is derived. + Spec ClusterVolumeSpec + + // PublishStatus contains the status of the volume as it pertains to its + // publishing on Nodes. + PublishStatus []*PublishStatus `json:",omitempty"` + + // Info is information about the global status of the volume. + Info *Info `json:",omitempty"` +} + +// ClusterVolumeSpec contains the spec used to create this volume. +type ClusterVolumeSpec struct { + // Group defines the volume group of this volume. Volumes belonging to the + // same group can be referred to by group name when creating Services. + // Referring to a volume by group instructs swarm to treat volumes in that + // group interchangeably for the purpose of scheduling. Volumes with an + // empty string for a group technically all belong to the same, emptystring + // group. + Group string `json:",omitempty"` + + // AccessMode defines how the volume is used by tasks. + AccessMode *AccessMode `json:",omitempty"` + + // AccessibilityRequirements specifies where in the cluster a volume must + // be accessible from. + // + // This field must be empty if the plugin does not support + // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the + // plugin does not support it, volume will not be created. + // + // If AccessibilityRequirements is empty, but the plugin does support + // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire + // cluster is a valid target for the volume. + AccessibilityRequirements *TopologyRequirement `json:",omitempty"` + + // CapacityRange defines the desired capacity that the volume should be + // created with. If nil, the plugin will decide the capacity. + CapacityRange *CapacityRange `json:",omitempty"` + + // Secrets defines Swarm Secrets that are passed to the CSI storage plugin + // when operating on this volume. + Secrets []Secret `json:",omitempty"` + + // Availability is the Volume's desired availability. Analogous to Node + // Availability, this allows the user to take volumes offline in order to + // update or delete them. + Availability Availability `json:",omitempty"` +} + +// Availability specifies the availability of the volume. +type Availability string + +const ( + // AvailabilityActive indicates that the volume is active and fully + // schedulable on the cluster. + AvailabilityActive Availability = "active" + + // AvailabilityPause indicates that no new workloads should use the + // volume, but existing workloads can continue to use it. + AvailabilityPause Availability = "pause" + + // AvailabilityDrain indicates that all workloads using this volume + // should be rescheduled, and the volume unpublished from all nodes. + AvailabilityDrain Availability = "drain" +) + +// AccessMode defines the access mode of a volume. +type AccessMode struct { + // Scope defines the set of nodes this volume can be used on at one time. + Scope Scope `json:",omitempty"` + + // Sharing defines the number and way that different tasks can use this + // volume at one time. + Sharing SharingMode `json:",omitempty"` + + // MountVolume defines options for using this volume as a Mount-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + MountVolume *TypeMount `json:",omitempty"` + + // BlockVolume defines options for using this volume as a Block-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + BlockVolume *TypeBlock `json:",omitempty"` +} + +// Scope defines the Scope of a Cluster Volume. This is how many nodes a +// Volume can be accessed simultaneously on. +type Scope string + +const ( + // ScopeSingleNode indicates the volume can be used on one node at a + // time. + ScopeSingleNode Scope = "single" + + // ScopeMultiNode indicates the volume can be used on many nodes at + // the same time. + ScopeMultiNode Scope = "multi" +) + +// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a +// Volume at the same time can use it. +type SharingMode string + +const ( + // SharingNone indicates that only one Task may use the Volume at a + // time. + SharingNone SharingMode = "none" + + // SharingReadOnly indicates that the Volume may be shared by any + // number of Tasks, but they must be read-only. + SharingReadOnly SharingMode = "readonly" + + // SharingOneWriter indicates that the Volume may be shared by any + // number of Tasks, but all after the first must be read-only. + SharingOneWriter SharingMode = "onewriter" + + // SharingAll means that the Volume may be shared by any number of + // Tasks, as readers or writers. + SharingAll SharingMode = "all" +) + +// TypeBlock defines options for using a volume as a block-type volume. +// +// Intentionally empty. +type TypeBlock struct{} + +// TypeMount contains options for using a volume as a Mount-type +// volume. +type TypeMount struct { + // FsType specifies the filesystem type for the mount volume. Optional. + FsType string `json:",omitempty"` + + // MountFlags defines flags to pass when mounting the volume. Optional. + MountFlags []string `json:",omitempty"` +} + +// TopologyRequirement expresses the user's requirements for a volume's +// accessible topology. +type TopologyRequirement struct { + // Requisite specifies a list of Topologies, at least one of which the + // volume must be accessible from. + // + // Taken verbatim from the CSI Spec: + // + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []Topology `json:",omitempty"` + + // Preferred is a list of Topologies that the volume should attempt to be + // provisioned in. + // + // Taken from the CSI spec: + // + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []Topology `json:",omitempty"` +} + +// Topology is a map of topological domains to topological segments. +// +// This description is taken verbatim from the CSI Spec: +// +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `json:",omitempty"` +} + +// CapacityRange describes the minimum and maximum capacity a volume should be +// created with +type CapacityRange struct { + // RequiredBytes specifies that a volume must be at least this big. The + // value of 0 indicates an unspecified minimum. + RequiredBytes int64 + + // LimitBytes specifies that a volume must not be bigger than this. The + // value of 0 indicates an unspecified maximum + LimitBytes int64 +} + +// Secret represents a Swarm Secret value that must be passed to the CSI +// storage plugin when operating on this Volume. It represents one key-value +// pair of possibly many. +type Secret struct { + // Key is the name of the key of the key-value pair passed to the plugin. + Key string + + // Secret is the swarm Secret object from which to read data. This can be a + // Secret name or ID. The Secret data is retrieved by Swarm and used as the + // value of the key-value pair passed to the plugin. + Secret string +} + +// PublishState represents the state of a Volume as it pertains to its +// use on a particular Node. +type PublishState string + +const ( + // StatePending indicates that the volume should be published on + // this node, but the call to ControllerPublishVolume has not been + // successfully completed yet and the result recorded by swarmkit. + StatePending PublishState = "pending-publish" + + // StatePublished means the volume is published successfully to the node. + StatePublished PublishState = "published" + + // StatePendingNodeUnpublish indicates that the Volume should be + // unpublished on the Node, and we're waiting for confirmation that it has + // done so. After the Node has confirmed that the Volume has been + // unpublished, the state will move to StatePendingUnpublish. + StatePendingNodeUnpublish PublishState = "pending-node-unpublish" + + // StatePendingUnpublish means the volume is still published to the node + // by the controller, awaiting the operation to unpublish it. + StatePendingUnpublish PublishState = "pending-controller-unpublish" +) + +// PublishStatus represents the status of the volume as published to an +// individual node +type PublishStatus struct { + // NodeID is the ID of the swarm node this Volume is published to. + NodeID string `json:",omitempty"` + + // State is the publish state of the volume. + State PublishState `json:",omitempty"` + + // PublishContext is the PublishContext returned by the CSI plugin when + // a volume is published. + PublishContext map[string]string `json:",omitempty"` +} + +// Info contains information about the Volume as a whole as provided by +// the CSI storage plugin. +type Info struct { + // CapacityBytes is the capacity of the volume in bytes. A value of 0 + // indicates that the capacity is unknown. + CapacityBytes int64 `json:",omitempty"` + + // VolumeContext is the context originating from the CSI storage plugin + // when the Volume is created. + VolumeContext map[string]string `json:",omitempty"` + + // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This + // is distinct from the Volume's Swarm ID, which is the ID used by all of + // the Docker Engine to refer to the Volume. If this field is blank, then + // the Volume has not been successfully created yet. + VolumeID string `json:",omitempty"` + + // AccessibleTopolgoy is the topology this volume is actually accessible + // from. + AccessibleTopology []Topology `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go new file mode 100644 index 0000000000..37c41a6096 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/create_options.go @@ -0,0 +1,29 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateOptions VolumeConfig +// +// Volume configuration +// swagger:model CreateOptions +type CreateOptions struct { + + // cluster volume spec + ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` + + // Name of the volume driver to use. + Driver string `json:"Driver,omitempty"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + DriverOpts map[string]string `json:"DriverOpts,omitempty"` + + // User-defined key/value metadata. + Labels map[string]string `json:"Labels,omitempty"` + + // The new volume's name. If not specified, Docker generates a name. + // + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go new file mode 100644 index 0000000000..ca5192a2a9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/list_response.go @@ -0,0 +1,18 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ListResponse VolumeListResponse +// +// Volume list response +// swagger:model ListResponse +type ListResponse struct { + + // List of volumes + Volumes []*Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go new file mode 100644 index 0000000000..8b0dd13899 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/options.go @@ -0,0 +1,8 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +import "github.com/docker/docker/api/types/filters" + +// ListOptions holds parameters to list volumes. +type ListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go new file mode 100644 index 0000000000..ea7d555e5b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume.go @@ -0,0 +1,75 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // cluster volume + ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` + + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, + // or `local` for machine level. + // + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *UsageData `json:"UsageData,omitempty"` +} + +// UsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model UsageData +type UsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go new file mode 100644 index 0000000000..f958f80a66 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_update.go @@ -0,0 +1,7 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// UpdateOptions is configuration to update a Volume with. +type UpdateOptions struct { + // Spec is the ClusterVolumeSpec to update the volume to. + Spec *ClusterVolumeSpec `json:"Spec,omitempty"` +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md new file mode 100644 index 0000000000..992f18117d --- /dev/null +++ b/vendor/github.com/docker/docker/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 0000000000..b76bf366bb --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// BuildCancel requests the daemon to cancel the ongoing build request. +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 0000000000..2b6606236e --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,45 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage))) + f, err := filters.ToJSON(opts.Filters) + if err != nil { + return nil, errors.Wrap(err, "prune could not marshal filters option") + } + query.Set("filters", f) + + serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + + if err != nil { + return nil, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, errors.Wrap(err, "error retrieving disk usage") + } + + return &report, nil +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go new file mode 100644 index 0000000000..921024fe4f --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go new file mode 100644 index 0000000000..54f55fa76e --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go new file mode 100644 index 0000000000..39cfb959ff --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointList returns the checkpoints of the given container in the docker host +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + return checkpoints, err +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go new file mode 100644 index 0000000000..54fa36cca8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client.go @@ -0,0 +1,354 @@ +/* +Package client is a Go client for the Docker Engine API. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/api/ + +# Usage + +You use the library by constructing a client object using [NewClientWithOpts] +and calling methods on it. The client can be configured from environment +variables by passing the [FromEnv] option, or configured manually by passing any +of the other available [Opts]. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } +*/ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net" + "net/http" + "net/url" + "path" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// DummyHost is a hostname used for local communication. +// +// It acts as a valid formatted hostname for local connections (such as "unix://" +// or "npipe://") which do not require a hostname. It should never be resolved, +// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2] +// and [RFC 6761, Section 6.3]). +// +// [RFC 7230, Section 5.4] defines that an empty header must be used for such +// cases: +// +// If the authority component is missing or undefined for the target URI, +// then a client MUST send a Host header field with an empty field-value. +// +// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not +// allow an empty header to be used, and requires req.URL.Scheme to be either +// "http" or "https". +// +// For further details, refer to: +// +// - https://github.com/docker/engine-api/issues/189 +// - https://github.com/golang/go/issues/13624 +// - https://github.com/golang/go/issues/61076 +// - https://github.com/moby/moby/issues/45935 +// +// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2 +// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3 +// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4 +// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 +const DummyHost = "api.moby.localhost" + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool +} + +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return ErrRedirect, otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The Docker client (and by extension docker API client) can be +// made to send a request like POST /containers//start where what would normally +// be in the name section of the URL is empty. This triggers an HTTP 301 from +// the daemon. +// +// In go 1.8 this 301 will be converted to a GET request, and ends up getting +// a 404 from the daemon. This behavior change manifests in the client in that +// before, the 301 was not followed and the client did not generate an error, +// but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewClientWithOpts initializes a new API client with a default HTTPClient, and +// default API host and version. It also initializes the custom HTTP headers to +// add to each request. +// +// It takes an optional list of Opt functional arguments, which are applied in +// the order they're provided, which allows modifying the defaults when creating +// the client. For example, the following initializes a client that configures +// itself with values from environment variables (client.FromEnv), and has +// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). +// +// cli, err := client.NewClientWithOpts( +// client.FromEnv, +// client.WithAPIVersionNegotiation(), +// ) +func NewClientWithOpts(ops ...Opt) (*Client, error) { + hostURL, err := ParseHostURL(DefaultDockerHost) + if err != nil { + return nil, err + } + + client, err := defaultHTTPClient(hostURL) + if err != nil { + return nil, err + } + c := &Client{ + host: DefaultDockerHost, + version: api.DefaultVersion, + client: client, + proto: hostURL.Scheme, + addr: hostURL.Host, + } + + for _, op := range ops { + if err := op(c); err != nil { + return nil, err + } + } + + if c.scheme == "" { + c.scheme = "http" + + tlsConfig := resolveTLSConfig(c.client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + c.scheme = "https" + } + } + + return c, nil +} + +func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { + transport := &http.Transport{} + err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) + if err != nil { + return nil, err + } + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + }, nil +} + +// Close the transport used by the client +func (cli *Client) Close() error { + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = path.Join(cli.basePath, "/v"+v, p) + } else { + apiPath = path.Join(cli.basePath, p) + } + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() +} + +// ClientVersion returns the API version used by this client. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// NegotiateAPIVersion queries the API and updates the version to match the API +// version. NegotiateAPIVersion downgrades the client's API version to match the +// APIVersion if the ping version is lower than the default version. If the API +// version reported by the server is higher than the maximum version supported +// by the client, it uses the client's maximum version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, or if the +// client did not get a successful ping response, it assumes it is connected with +// an old daemon that does not support API version negotiation, in which case it +// downgrades to the latest version of the API before version negotiation was +// added (1.24). +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + if !cli.manualOverride { + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) + } +} + +// NegotiateAPIVersionPing downgrades the client's API version to match the +// APIVersion in the ping response. If the API version in pingResponse is higher +// than the maximum version supported by the client, it uses the client's maximum +// version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, we assume +// we are connected with an old daemon without API version negotiation support, +// and downgrade to the latest version of the API before version negotiation was +// added (1.24). +func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { + if !cli.manualOverride { + cli.negotiateAPIVersionPing(pingResponse) + } +} + +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version from the ping response. +func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { + // default to the latest version before versioning headers existed + if pingResponse.APIVersion == "" { + pingResponse.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(pingResponse.APIVersion, cli.version) { + cli.version = pingResponse.APIVersion + } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } +} + +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + c := *cli.client + return &c +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { + proto, addr, ok := strings.Cut(host, "://") + if !ok || addr == "" { + return nil, errors.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return nil, err + } + addr = parsed.Host + basePath = parsed.Path + } + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, +// that can be used for proxying the daemon connection. +// +// Used by `docker dial-stdio` (docker/cli#889). +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if transport, ok := cli.client.Transport.(*http.Transport); ok { + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, cli.proto, cli.addr) + } + } + return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + } +} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go new file mode 100644 index 0000000000..9e366ce20d --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_deprecated.go @@ -0,0 +1,27 @@ +package client + +import "net/http" + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// +// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion], +// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API +// version negotiation by passing the [WithAPIVersionNegotiation] option instead +// of WithVersion. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} + +// NewEnvClient initializes a new API client based on environment variables. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option. +func NewEnvClient() (*Client, error) { + return NewClientWithOpts(FromEnv) +} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go new file mode 100644 index 0000000000..319b738d3e --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -0,0 +1,8 @@ +//go:build !windows +// +build !windows + +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go new file mode 100644 index 0000000000..56572d1a27 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -0,0 +1,5 @@ +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 0000000000..f6b1881fc3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigCreate creates a new config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 0000000000..9be7882c3d --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if id == "" { + return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + } + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Config{}, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 0000000000..565acc6e27 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + return configs, err +} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 0000000000..24b94e9c18 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ConfigRemove removes a config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 0000000000..1ac2985435 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigUpdate attempts to update a config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", version.String()) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go new file mode 100644 index 0000000000..ba92117d3e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -0,0 +1,59 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{ + "Content-Type": {"text/plain"}, + } + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go new file mode 100644 index 0000000000..cd7f763464 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -0,0 +1,55 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "errors" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ContainerCommit applies changes to a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if !options.Pause { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go new file mode 100644 index 0000000000..883be7fa34 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -0,0 +1,93 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := "/containers/" + containerID + "/archive" + response, err := cli.head(ctx, urlStr, query, nil) + defer ensureReaderClosed(response) + if err != nil { + return types.ContainerPathStat{}, err + } + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + containerID + "/archive" + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + defer ensureReaderClosed(response) + if err != nil { + return err + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := "/containers/" + containerID + "/archive" + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go new file mode 100644 index 0000000000..193a2bb562 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -0,0 +1,83 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "path" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based on the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { + var response container.CreateResponse + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { + return response, err + } + + if hostConfig != nil { + if versions.LessThan(cli.ClientVersion(), "1.25") { + // When using API 1.24 and under, the client is responsible for removing the container + hostConfig.AutoRemove = false + } + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { + // KernelMemory was added in API 1.40, and deprecated in API 1.42 + hostConfig.KernelMemory = 0 + } + if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { + // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize + hostConfig.ConsoleSize = [2]uint{0, 0} + } + } + + query := url.Values{} + if p := formatPlatform(platform); p != "" { + query.Set("platform", p) + } + + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} + +// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7). +// +// Similar to containerd's platforms.Format(), but does allow components to be +// omitted (e.g. pass "architecture" only, without "os": +// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 +func formatPlatform(platform *ocispec.Platform) string { + if platform == nil { + return "" + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go new file mode 100644 index 0000000000..c22c819a79 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.FilesystemChange, error) { + var changes []container.FilesystemChange + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + return changes, err +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go new file mode 100644 index 0000000000..6a2cb006f8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -0,0 +1,66 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + headers := map[string][]string{ + "Content-Type": {"application/json"}, + } + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go new file mode 100644 index 0000000000..d0c0a5cbad --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go new file mode 100644 index 0000000000..d48f0d3a68 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -0,0 +1,53 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + if containerID == "" { + return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + if containerID == "" { + return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} + } + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go new file mode 100644 index 0000000000..7c9529f1e1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + if signal != "" { + query.Set("signal", signal) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go new file mode 100644 index 0000000000..bd491b3db9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -0,0 +1,57 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + return containers, err +} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go new file mode 100644 index 0000000000..9bdf2b0fa6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -0,0 +1,80 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "until"`) + } + query.Set("until", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go new file mode 100644 index 0000000000..5e7271a371 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go new file mode 100644 index 0000000000..04383deaaf --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go new file mode 100644 index 0000000000..c21de609b0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -0,0 +1,27 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go new file mode 100644 index 0000000000..240fdf552b --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go new file mode 100644 index 0000000000..a9d4c0c79a --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go new file mode 100644 index 0000000000..1e0ad99981 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go new file mode 100644 index 0000000000..c2e0b15dca --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go new file mode 100644 index 0000000000..0a6488dde8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -0,0 +1,42 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} + +// ContainerStatsOneShot gets a single stat entry from a container. +// It differs from `ContainerStats` in that the API should not wait to prime the stats +func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + query.Set("one-shot", "1") + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go new file mode 100644 index 0000000000..2a43ce2274 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -0,0 +1,30 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go new file mode 100644 index 0000000000..a5b78999bf --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go new file mode 100644 index 0000000000..1d8f873169 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go new file mode 100644 index 0000000000..bf68a5300e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates the resources of a container. +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go new file mode 100644 index 0000000000..2375eb1e80 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -0,0 +1,104 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/url" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.WaitResponse) + errC := make(chan error, 1) + + query := url.Values{} + if condition != "" { + query.Set("condition", string(condition)) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC + } + + go func() { + defer ensureReaderClosed(resp) + + body := resp.body + responseText := bytes.NewBuffer(nil) + stream := io.TeeReader(body, responseText) + + var res container.WaitResponse + if err := json.NewDecoder(stream).Decode(&res); err != nil { + // NOTE(nicks): The /wait API does not work well with HTTP proxies. + // At any time, the proxy could cut off the response stream. + // + // But because the HTTP status has already been written, the proxy's + // only option is to write a plaintext error message. + // + // If there's a JSON parsing error, read the real error message + // off the body and send it to the client. + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { + resultC := make(chan container.WaitResponse) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.WaitResponse + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go new file mode 100644 index 0000000000..ba0d92e9e6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { + var query url.Values + if len(options.Types) > 0 { + query = url.Values{} + for _, t := range options.Types { + query.Add("type", string(t)) + } + } + + serverResp, err := cli.get(ctx, "/system/df", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.DiskUsage{}, err + } + + var du types.DiskUsage + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) + } + return du, nil +} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 0000000000..efab066d3b --- /dev/null +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/registry" +) + +// DistributionInspect returns the image digest with the full manifest. +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registry.DistributionInspect + if image == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: image} + } + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + registry.AuthHeader: {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + return distributionInspect, err +} diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go new file mode 100644 index 0000000000..61dd45c1d7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/envvars.go @@ -0,0 +1,90 @@ +package client // import "github.com/docker/docker/client" + +const ( + // EnvOverrideHost is the name of the environment variable that can be used + // to override the default host to connect to (DefaultDockerHost). + // + // This env-var is read by FromEnv and WithHostFromEnv and when set to a + // non-empty value, takes precedence over the default host (which is platform + // specific), or any host already set. + EnvOverrideHost = "DOCKER_HOST" + + // EnvOverrideAPIVersion is the name of the environment variable that can + // be used to override the API version to use. Value should be + // formatted as MAJOR.MINOR, for example, "1.19". + // + // This env-var is read by FromEnv and WithVersionFromEnv and when set to a + // non-empty value, takes precedence over API version negotiation. + // + // This environment variable should be used for debugging purposes only, as + // it can set the client to use an incompatible (or invalid) API version. + EnvOverrideAPIVersion = "DOCKER_API_VERSION" + + // EnvOverrideCertPath is the name of the environment variable that can be + // used to specify the directory from which to load the TLS certificates + // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure + // the Client for a TCP connection protected by TLS client authentication. + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection. Refer to EnvTLSVerify below to learn how to + // disable verification for testing purposes. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // For local access to the API, it is recommended to connect with the daemon + // using the default local socket connection (on Linux), or the named pipe + // (on Windows). + // + // If you need to access the API of a remote daemon, consider using an SSH + // (ssh://) connection, which is easier to set up, and requires no additional + // configuration if the host is accessible using ssh. + // + // If you cannot use the alternatives above, and you must expose the API over + // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ + // to learn how to configure the daemon and client to use a TCP connection + // with TLS client authentication. Make sure you know the differences between + // a regular TLS connection and a TLS connection protected by TLS client + // authentication, and verify that the API cannot be accessed by other clients. + EnvOverrideCertPath = "DOCKER_CERT_PATH" + + // EnvTLSVerify is the name of the environment variable that can be used to + // enable or disable TLS certificate verification. When set to a non-empty + // value, TLS certificate verification is enabled, and the client is configured + // to use a TLS connection, using certificates from the default directories + // (within `~/.docker`); refer to EnvOverrideCertPath above for additional + // details. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // Before setting up your client and daemon to use a TCP connection with TLS + // client authentication, consider using one of the alternatives mentioned + // in EnvOverrideCertPath above. + // + // Disabling TLS certificate verification (for testing purposes) + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection, and it is highly recommended to keep verification + // enabled to prevent machine-in-the-middle attacks. Refer to the documentation + // at https://docs.docker.com/engine/security/protect-access/ and pages linked + // from that page to learn how to configure the daemon and client to use a + // TCP connection with TLS client authentication enabled. + // + // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to + // disable TLS certificate verification. Disabling verification is insecure, + // so should only be done for testing purposes. From the Go documentation + // (https://pkg.go.dev/crypto/tls#Config): + // + // InsecureSkipVerify controls whether a client verifies the server's + // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls + // accepts any certificate presented by the server and any host name in that + // certificate. In this mode, TLS is susceptible to machine-in-the-middle + // attacks unless custom verification is used. This should be used only for + // testing or in combination with VerifyConnection or VerifyPeerCertificate. + EnvTLSVerify = "DOCKER_TLS_VERIFY" +) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go new file mode 100644 index 0000000000..6878144c41 --- /dev/null +++ b/vendor/github.com/docker/docker/client/errors.go @@ -0,0 +1,68 @@ +package client // import "github.com/docker/docker/client" + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + return errors.As(err, &errConnectionFailed{}) +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility +type notFound interface { + error + NotFound() bool +} + +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. +func IsErrNotFound(err error) bool { + if errdefs.IsNotFound(err) { + return true + } + var e notFound + return errors.As(err, &e) +} + +type objectNotFoundError struct { + object string + id string +} + +func (e objectNotFoundError) NotFound() {} + +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go new file mode 100644 index 0000000000..a9c48a9288 --- /dev/null +++ b/vendor/github.com/docker/docker/client/events.go @@ -0,0 +1,101 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go new file mode 100644 index 0000000000..7e84865f69 --- /dev/null +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -0,0 +1,148 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + req, err := cli.buildRequest(http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) + if err != nil { + return types.HijackedResponse{}, err + } + conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.NewHijackedResponse(conn, mediaType), err +} + +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest(http.MethodPost, url, nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + conn, _, err := cli.setupHijackConn(ctx, req, proto) + return conn, err +} + +// fallbackDial is used when WithDialer() was not called. +// See cli.Dialer(). +func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + return tls.Dial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + dialer := cli.Dialer() + conn, err := dialer(ctx) + if err != nil { + return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + _ = tcpConn.SetKeepAlive(true) + _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + + //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons + if err != httputil.ErrPersistEOF { + if err != nil { + return nil, "", err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + _ = resp.Body.Close() + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite if the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } + } else { + br.Reset(nil) + } + + var mediaType string + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { + // Prior to 1.42, Content-Type is always set to raw-stream and not relevant + mediaType = resp.Header.Get("Content-Type") + } + + return c, mediaType, nil +} + +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go new file mode 100644 index 0000000000..d16e1d8ea9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -0,0 +1,146 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends a request to the daemon to build images. +// The Body in the response implements an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + headers.Set("Content-Type", "application/x-tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return query, err + } + query.Set("platform", strings.ToLower(options.Platform)) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) + + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go new file mode 100644 index 0000000000..6a9b708f7d --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" +) + +// ImageCreate creates a new image based on the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{registry.AuthHeader: {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go new file mode 100644 index 0000000000..b5bea10d8f --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/image" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + return history, err +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go new file mode 100644 index 0000000000..c5de42cb79 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -0,0 +1,40 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based on the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + // Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go new file mode 100644 index 0000000000..1de10e5a08 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + if imageID == "" { + return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ImageInspect{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go new file mode 100644 index 0000000000..950d513334 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -0,0 +1,49 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("shared-size", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + return images, err +} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go new file mode 100644 index 0000000000..91016e493c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go new file mode 100644 index 0000000000..56af6d7f98 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go new file mode 100644 index 0000000000..a23975591b --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -0,0 +1,64 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go new file mode 100644 index 0000000000..dd1b8f3471 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -0,0 +1,55 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "errors" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + name := reference.FamiliarName(ref) + query := url.Values{} + if !options.All { + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + } + + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{registry.AuthHeader: {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go new file mode 100644 index 0000000000..6a9fb3f41f --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + var dels []types.ImageDeleteResponseItem + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return dels, err + } + + err = json.NewDecoder(resp.body).Decode(&dels) + return dels, err +} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go new file mode 100644 index 0000000000..d1314e4b22 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go new file mode 100644 index 0000000000..5f0c49ed30 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -0,0 +1,53 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" +) + +// ImageSearch makes the docker host search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + defer ensureReaderClosed(resp) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{registry.AuthHeader: {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go new file mode 100644 index 0000000000..5652bfc252 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/pkg/errors" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go new file mode 100644 index 0000000000..c856704e23 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return info, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go new file mode 100644 index 0000000000..7993c5a48f --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface.go @@ -0,0 +1,201 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + HTTPClient() *http.Client + ServerVersion(ctx context.Context) (types.Version, error) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) + Dialer() func(context.Context) (net.Conn, error) + Close() error +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) + ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, options container.StopOptions) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, options container.StopOptions) error + ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, network, container string, force bool) error + NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, network string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) + VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) + VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go new file mode 100644 index 0000000000..402ffb512c --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go new file mode 100644 index 0000000000..5502cd7426 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go new file mode 100644 index 0000000000..19e985e0b9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/login.go @@ -0,0 +1,24 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/registry" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go new file mode 100644 index 0000000000..5718946134 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go new file mode 100644 index 0000000000..278d9383a8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go new file mode 100644 index 0000000000..dd15676656 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go new file mode 100644 index 0000000000..0f90e2bb90 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -0,0 +1,49 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + if networkID == "" { + return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + } + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResource, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go new file mode 100644 index 0000000000..ed2acb5571 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + return networkResources, err +} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 0000000000..cebb188219 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go new file mode 100644 index 0000000000..9d6c6cef07 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go new file mode 100644 index 0000000000..95ab9b1be0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if nodeID == "" { + return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + } + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Node{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go new file mode 100644 index 0000000000..c212906bc7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + return nodes, err +} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go new file mode 100644 index 0000000000..e44436debc --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go new file mode 100644 index 0000000000..0d0fc3b788 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", version.String()) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go new file mode 100644 index 0000000000..099ad41846 --- /dev/null +++ b/vendor/github.com/docker/docker/client/options.go @@ -0,0 +1,210 @@ +package client + +import ( + "context" + "net" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" +) + +// Opt is a configuration option to initialize a client +type Opt func(*Client) error + +// FromEnv configures the client with values from environment variables. +// +// FromEnv uses the following environment variables: +// +// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. +// +// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to +// use, leave empty for latest. +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). +func FromEnv(c *Client) error { + ops := []Opt{ + WithTLSClientConfigFromEnv(), + WithHostFromEnv(), + WithVersionFromEnv(), + } + for _, op := range ops { + if err := op(c); err != nil { + return err + } + } + return nil +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) Opt { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHostFromEnv overrides the client host with the host specified in the +// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, +// or set to an empty value, the host is not modified. +func WithHostFromEnv() Opt { + return func(c *Client) error { + if host := os.Getenv(EnvOverrideHost); host != "" { + return WithHost(host)(c) + } + return nil + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) Opt { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) Opt { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// WithScheme overrides the client scheme with the specified one +func WithScheme(scheme string) Opt { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } +} + +// WithTLSClientConfigFromEnv configures the client's TLS settings with the +// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. +// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. +// +// WithTLSClientConfigFromEnv uses the following environment variables: +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). +func WithTLSClientConfigFromEnv() Opt { + return func(c *Client) error { + dockerCertPath := os.Getenv(EnvOverrideCertPath) + if dockerCertPath == "" { + return nil + } + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + return nil + } +} + +// WithVersion overrides the client version with the specified one. If an empty +// version is specified, the value will be ignored to allow version negotiation. +func WithVersion(version string) Opt { + return func(c *Client) error { + if version != "" { + c.version = version + c.manualOverride = true + } + return nil + } +} + +// WithVersionFromEnv overrides the client version with the version specified in +// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set, +// the version is not modified. +func WithVersionFromEnv() Opt { + return func(c *Client) error { + return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) + } +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { + return func(c *Client) error { + c.negotiateVersion = true + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 0000000000..347ae71e02 --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,75 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/http" + "path" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/errdefs" +) + +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err == nil { + defer ensureReaderClosed(serverResp) + switch serverResp.statusCode { + case http.StatusOK, http.StatusInternalServerError: + // Server handled the request, so parse the response + return parsePingResponse(cli, serverResp) + } + } else if IsErrConnectionFailed(err) { + return ping, err + } + + req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err = cli.doRequest(ctx, req) + defer ensureReaderClosed(serverResp) + if err != nil { + return ping, err + } + return parsePingResponse(cli, serverResp) +} + +func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { + var ping types.Ping + if resp.header == nil { + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) + } + ping.APIVersion = resp.header.Get("API-Version") + ping.OSType = resp.header.Get("OSType") + if resp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + if bv := resp.header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = types.BuilderVersion(bv) + } + if si := resp.header.Get("Swarm"); si != "" { + state, role, _ := strings.Cut(si, "/") + ping.SwarmStatus = &swarm.Status{ + NodeState: swarm.LocalNodeState(state), + ControlAvailable: role == "manager", + } + } + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 0000000000..b95dbaf686 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go new file mode 100644 index 0000000000..01f6574f95 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go new file mode 100644 index 0000000000..736da48bd1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go new file mode 100644 index 0000000000..f09e460660 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if name == "" { + return nil, nil, objectNotFoundError{object: "plugin", id: name} + } + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go new file mode 100644 index 0000000000..3a740ec4f6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -0,0 +1,114 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{registry.AuthHeader: {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{registry.AuthHeader: {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, errors.Errorf("permission denied while installing plugin %s", options.RemoteRef) + } + } + return privileges, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go new file mode 100644 index 0000000000..2091a054d6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + query := url.Values{} + + if filter.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + return plugins, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go new file mode 100644 index 0000000000..18f9754c4c --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + + "github.com/docker/docker/api/types/registry" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{registry.AuthHeader: {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go new file mode 100644 index 0000000000..4cd66958c3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go new file mode 100644 index 0000000000..dcf5752ca2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -0,0 +1,12 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go new file mode 100644 index 0000000000..995d1fd2ca --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -0,0 +1,40 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "github.com/pkg/errors" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{registry.AuthHeader: {registryAuth}} + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) +} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go new file mode 100644 index 0000000000..bcedcf3bd9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/request.go @@ -0,0 +1,275 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int + reqURL *url.URL +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) +} + +// post sends an http request to the docker API using the method POST with a specific Go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == http.MethodPost || method == http.MethodPut) + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + req.URL.Scheme = cli.scheme + req.URL.Host = cli.addr + + if cli.proto == "unix" || cli.proto == "npipe" { + // Override host header for non-tcp connections. + req.Host = DummyHost + } + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + + resp, err := cli.doRequest(ctx, req) + switch { + case errors.Is(err, context.Canceled): + return serverResponse{}, errdefs.Cancelled(err) + case errors.Is(err, context.DeadlineExceeded): + return serverResponse{}, errdefs.Deadline(err) + case err == nil: + err = cli.checkResponseErr(resp) + } + return resp, errdefs.FromStatusCode(err, resp.statusCode) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} + + req = req.WithContext(ctx) + resp, err := cli.client.Do(req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") + } else { + f.Close() + err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") + } + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header + } + return serverResp, nil +} + +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + var body []byte + var err error + if serverResp.body != nil { + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.body, + N: int64(bodyMax), + } + body, err = io.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + } + } + if len(body) == 0 { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return errors.Wrap(err, "Error reading JSON") + } + errorMessage = strings.TrimSpace(errorResponse.Message) + } else { + errorMessage = strings.TrimSpace(string(body)) + } + + return errors.Wrap(errors.New(errorMessage), "Error response from daemon") +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + for k, v := range headers { + req.Header[http.CanonicalHeaderKey(k)] = v + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if response.body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(io.Discard, response.body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 0000000000..c65d38a191 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// SecretCreate creates a new secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 0000000000..5906874b15 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + if id == "" { + return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Secret{}, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 0000000000..a0289c9f44 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + return secrets, err +} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 0000000000..f47f68b6e0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// SecretRemove removes a secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go new file mode 100644 index 0000000000..2e939e8ced --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretUpdate attempts to update a secret. +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", version.String()) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go new file mode 100644 index 0000000000..b6065b8eef --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -0,0 +1,179 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// ServiceCreate creates a new service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var response types.ServiceCreateResponse + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} + +func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil { + warning = digestWarning(taskSpec.ContainerSpec.Image) + } else { + taskSpec.ContainerSpec.Image = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil { + warning = digestWarning(taskSpec.PluginSpec.Remote) + } else { + taskSpec.PluginSpec.Remote = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// image unmodified in other situations. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return image +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go new file mode 100644 index 0000000000..cee020c98b --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if serviceID == "" { + return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + } + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Service{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go new file mode 100644 index 0000000000..f97ec75a5c --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + if options.Status { + query.Set("status", "true") + } + + resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + return services, err +} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 0000000000..906fd4059e --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go new file mode 100644 index 0000000000..2c46326ebc --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go new file mode 100644 index 0000000000..ff8cded8be --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -0,0 +1,75 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. +// It should be the value as set *before* the update. You can find this value in the Meta field +// of swarm.Service, which can be found using ServiceInspectWithRaw. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + query = url.Values{} + response = types.ServiceUpdateResponse{} + ) + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", version.String()) + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 0000000000..19f59dd582 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go new file mode 100644 index 0000000000..da3c1637ef --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go new file mode 100644 index 0000000000..b52b67a884 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go new file mode 100644 index 0000000000..a1cf0455d2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go new file mode 100644 index 0000000000..90ca84b363 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 0000000000..d2412f7d44 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go new file mode 100644 index 0000000000..9fde7d75ee --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", version.String()) + query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) + query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go new file mode 100644 index 0000000000..dde1f6c59d --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// TaskInspectWithRaw returns the task information and its raw representation. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if taskID == "" { + return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + } + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Task{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go new file mode 100644 index 0000000000..4869b44493 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -0,0 +1,35 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + return tasks, err +} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go new file mode 100644 index 0000000000..6222fab577 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go new file mode 100644 index 0000000000..5541344366 --- /dev/null +++ b/vendor/github.com/docker/docker/client/transport.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "crypto/tls" + "net/http" +) + +// resolveTLSConfig attempts to resolve the TLS configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 0000000000..7f3ff44eb8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,34 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + "regexp" + + "github.com/docker/docker/api/types/filters" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToJSON(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go new file mode 100644 index 0000000000..8f17ff4e87 --- /dev/null +++ b/vendor/github.com/docker/docker/client/version.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + return server, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go new file mode 100644 index 0000000000..b3b182437b --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/volume" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { + var vol volume.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + defer ensureReaderClosed(resp) + if err != nil { + return vol, err + } + err = json.NewDecoder(resp.body).Decode(&vol) + return vol, err +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go new file mode 100644 index 0000000000..b3ba4e6046 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/volume" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { + vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return vol, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { + if volumeID == "" { + return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + + var vol volume.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return vol, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return vol, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&vol) + return vol, body, err +} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go new file mode 100644 index 0000000000..d5ea9827c7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/volume" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) { + var volumes volume.ListResponse + query := url.Values{} + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + return volumes, err +} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go new file mode 100644 index 0000000000..6e324708f2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go new file mode 100644 index 0000000000..1f26438360 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/versions" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go new file mode 100644 index 0000000000..33bd31e531 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -0,0 +1,24 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" +) + +// VolumeUpdate updates a volume. This only works for Cluster Volumes, and +// only some fields can be updated. +func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { + if err := cli.NewVersionError("1.42", "volume update"); err != nil { + return err + } + + query := url.Values{} + query.Set("version", version.String()) + + resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 0000000000..61e7456b4e --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,69 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 0000000000..c211f174fc --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 0000000000..fe06fb6f70 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,279 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +func (e errNotFound) Unwrap() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil || IsNotFound(err) { + return err + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +func (e errInvalidParameter) Unwrap() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil || IsInvalidParameter(err) { + return err + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +func (e errConflict) Unwrap() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil || IsConflict(err) { + return err + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +func (e errUnauthorized) Unwrap() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil || IsUnauthorized(err) { + return err + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +func (e errUnavailable) Unwrap() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +func (e errForbidden) Unwrap() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil || IsForbidden(err) { + return err + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +func (e errSystem) Unwrap() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil || IsSystem(err) { + return err + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +func (e errNotModified) Unwrap() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil || IsNotModified(err) { + return err + } + return errNotModified{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +func (e errNotImplemented) Unwrap() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil || IsNotImplemented(err) { + return err + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +func (e errUnknown) Unwrap() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil || IsUnknown(err) { + return err + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +func (e errCancelled) Unwrap() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil || IsCancelled(err) { + return err + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +func (e errDeadline) Unwrap() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil || IsDeadline(err) { + return err + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +func (e errDataLoss) Unwrap() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil || IsDataLoss(err) { + return err + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 0000000000..77bda389d1 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,46 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "net/http" +) + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return nil + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 0000000000..3abf07d0c3 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,107 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 0000000000..bb7e4e3369 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,242 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 0000000000..892adf8c66 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 0000000000..ce950171e3 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 0000000000..99846ffddb --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,81 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 0000000000..98e9a1dc61 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 0000000000..a1d7beb4d8 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,38 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "errors" + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + return configureUnixTransport(tr, proto, addr) + case "npipe": + return configureNpipeTransport(tr, proto, addr) + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 0000000000..386cf0dbbd --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package sockets + +import ( + "fmt" + "net" + "net/http" + "syscall" + "time" +) + +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 0000000000..5c21644e1f --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,27 @@ +package sockets + +import ( + "net" + "net/http" + "time" + + "github.com/Microsoft/go-winio" +) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 0000000000..53cbb6c79e --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 0000000000..a8b5dbb6fd --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,32 @@ +// +build !windows + +package sockets + +import ( + "net" + "os" + "syscall" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := os.Chown(path, 0, gid); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 0000000000..3d97fc7a29 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 0000000000..1b4f6c208a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 0000000000..f57de90da8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 0000000000..00d65f3277 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 0000000000..a26b046d94 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 0000000000..24552483c6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 0000000000..63b0f08bef --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 0000000000..35b882c09a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 0000000000..fe1bd7d904 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 0000000000..93464c91cf --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 0000000000..e748e1730e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 0000000000..9581ccd304 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,205 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + sizVar := SizeVarint(uint64(siz)) + p.grow(siz + sizVar) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 0000000000..0f5fb173e9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 0000000000..d4db5a1c14 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 0000000000..341c6f57f5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,605 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 0000000000..6f1ae120ec --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,389 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 0000000000..80db1c155b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,973 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 0000000000..b3aa39190a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 0000000000..f48a756761 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000000..b6cad90834 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 0000000000..7ffd3c29d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000000..d55a335d94 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 0000000000..aca8eed02a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 0000000000..28da1475fb --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,610 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + if isOneofMessage { + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 0000000000..40ea3dd935 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 0000000000..5a5fd93f7c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 0000000000..f8babdefab --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3009 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if isOneofMessage { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] + pp, err = m.XXX_Marshal(pp, p.deterministic) + p.buf = append(p.buf, pp...) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 0000000000..997f57c1e1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 0000000000..60dcf70d1e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,676 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case isSlice && !isPointer: // E.g. []pb.T + mergeInfo := getMergeInfo(tf) + zero := reflect.Zero(tf) + mfi.merge = func(dst, src pointer) { + // TODO: Make this faster? + dstsp := dst.asPointerTo(f.Type) + dsts := dstsp.Elem() + srcs := src.asPointerTo(f.Type).Elem() + for i := 0; i < srcs.Len(); i++ { + dsts = reflect.Append(dsts, zero) + srcElement := srcs.Index(i).Addr() + dstElement := dsts.Index(dsts.Len() - 1).Addr() + mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) + } + if dsts.IsNil() { + dsts = reflect.MakeSlice(f.Type, 0, 0) + } + dstsp.Elem().Set(dsts) + } + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000000..937229386a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2249 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if len(oneofFields) > 0 { + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 0000000000..00d6c7ad93 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 0000000000..87416afe95 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,930 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 0000000000..1d6c6aa0e4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 0000000000..f85c0cc81a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 0000000000..9324f6542b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 0000000000..38439fa990 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 0000000000..b175d1b642 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 0000000000..c1cf7bf85e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go new file mode 100644 index 0000000000..960c93b5f4 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -0,0 +1,151 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + "time" +) + +// ConfigFile is the configuration file that holds the metadata describing +// how to launch a container. See: +// https://github.com/opencontainers/image-spec/blob/master/config.md +// +// docker_version and os.version are not part of the spec but included +// for backwards compatibility. +type ConfigFile struct { + Architecture string `json:"architecture"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + History []History `json:"history,omitempty"` + OS string `json:"os"` + RootFS RootFS `json:"rootfs"` + Config Config `json:"config"` + OSVersion string `json:"os.version,omitempty"` + Variant string `json:"variant,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Platform attempts to generates a Platform from the ConfigFile fields. +func (cf *ConfigFile) Platform() *Platform { + if cf.OS == "" && cf.Architecture == "" && cf.OSVersion == "" && cf.Variant == "" && len(cf.OSFeatures) == 0 { + return nil + } + return &Platform{ + OS: cf.OS, + Architecture: cf.Architecture, + OSVersion: cf.OSVersion, + Variant: cf.Variant, + OSFeatures: cf.OSFeatures, + } +} + +// History is one entry of a list recording how this container image was built. +type History struct { + Author string `json:"author,omitempty"` + Created Time `json:"created,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Time is a wrapper around time.Time to help with deep copying +type Time struct { + time.Time +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// RootFS holds the ordered list of file system deltas that comprise the +// container image's root filesystem. +type RootFS struct { + Type string `json:"type"` + DiffIDs []Hash `json:"diff_ids"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config is a submessage of the config file described as: +// +// The execution parameters which SHOULD be used as a base when running +// a container using the image. +// +// The names of the fields in this message are chosen to reflect the JSON +// payload of the Config as defined here: +// https://git.io/vrAET +// and +// https://github.com/opencontainers/image-spec/blob/master/config.md +type Config struct { + AttachStderr bool `json:"AttachStderr,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` + Domainname string `json:"Domainname,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + Env []string `json:"Env,omitempty"` + Hostname string `json:"Hostname,omitempty"` + Image string `json:"Image,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Tty bool `json:"Tty,omitempty"` + User string `json:"User,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + MacAddress string `json:"MacAddress,omitempty"` + StopSignal string `json:"StopSignal,omitempty"` + Shell []string `json:"Shell,omitempty"` +} + +// ParseConfigFile parses the io.Reader's contents into a ConfigFile. +func ParseConfigFile(r io.Reader) (*ConfigFile, error) { + cf := ConfigFile{} + if err := json.NewDecoder(r).Decode(&cf); err != nil { + return nil, err + } + return &cf, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go new file mode 100644 index 0000000000..7a84736be2 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package + +// Package v1 defines structured types for OCI v1 images +package v1 diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go new file mode 100644 index 0000000000..f78a5fa89e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -0,0 +1,123 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "crypto" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" +) + +// Hash is an unqualified digest of some content, e.g. sha256:deadbeef +type Hash struct { + // Algorithm holds the algorithm used to compute the hash. + Algorithm string + + // Hex holds the hex portion of the content hash. + Hex string +} + +// String reverses NewHash returning the string-form of the hash. +func (h Hash) String() string { + return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) +} + +// NewHash validates the input string is a hash and returns a strongly type Hash object. +func NewHash(s string) (Hash, error) { + h := Hash{} + if err := h.parse(s); err != nil { + return Hash{}, err + } + return h, nil +} + +// MarshalJSON implements json.Marshaler +func (h Hash) MarshalJSON() ([]byte, error) { + return json.Marshal(h.String()) +} + +// UnmarshalJSON implements json.Unmarshaler +func (h *Hash) UnmarshalJSON(data []byte) error { + s, err := strconv.Unquote(string(data)) + if err != nil { + return err + } + return h.parse(s) +} + +// MarshalText implements encoding.TextMarshaler. This is required to use +// v1.Hash as a key in a map when marshalling JSON. +func (h Hash) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. This is required to use +// v1.Hash as a key in a map when unmarshalling JSON. +func (h *Hash) UnmarshalText(text []byte) error { + return h.parse(string(text)) +} + +// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") +func Hasher(name string) (hash.Hash, error) { + switch name { + case "sha256": + return crypto.SHA256.New(), nil + default: + return nil, fmt.Errorf("unsupported hash: %q", name) + } +} + +func (h *Hash) parse(unquoted string) error { + parts := strings.Split(unquoted, ":") + if len(parts) != 2 { + return fmt.Errorf("cannot parse hash: %q", unquoted) + } + + rest := strings.TrimLeft(parts[1], "0123456789abcdef") + if len(rest) != 0 { + return fmt.Errorf("found non-hex character in hash: %c", rest[0]) + } + + hasher, err := Hasher(parts[0]) + if err != nil { + return err + } + // Compare the hex to the expected size (2 hex characters per byte) + if len(parts[1]) != hasher.Size()*2 { + return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) + } + + h.Algorithm = parts[0] + h.Hex = parts[1] + return nil +} + +// SHA256 computes the Hash of the provided io.Reader's content. +func SHA256(r io.Reader) (Hash, int64, error) { + hasher := crypto.SHA256.New() + n, err := io.Copy(hasher, r) + if err != nil { + return Hash{}, 0, err + } + return Hash{ + Algorithm: "sha256", + Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), + }, n, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go new file mode 100644 index 0000000000..8de9e47645 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image defines the interface for interacting with an OCI v1 image. +type Image interface { + // Layers returns the ordered collection of filesystem layers that comprise this image. + // The order of the list is oldest/base layer first, and most-recent/top layer last. + Layers() ([]Layer, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // ConfigName returns the hash of the image's config file, also known as + // the Image ID. + ConfigName() (Hash, error) + + // ConfigFile returns this image's config file. + ConfigFile() (*ConfigFile, error) + + // RawConfigFile returns the serialized bytes of ConfigFile(). + RawConfigFile() ([]byte, error) + + // Digest returns the sha256 of this image's manifest. + Digest() (Hash, error) + + // Manifest returns this image's Manifest object. + Manifest() (*Manifest, error) + + // RawManifest returns the serialized bytes of Manifest() + RawManifest() ([]byte, error) + + // LayerByDigest returns a Layer for interacting with a particular layer of + // the image, looking it up by "digest" (the compressed hash). + LayerByDigest(Hash) (Layer, error) + + // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" + // (the uncompressed hash). + LayerByDiffID(Hash) (Layer, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go new file mode 100644 index 0000000000..8e7bc8ebb3 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ImageIndex defines the interface for interacting with an OCI image index. +type ImageIndex interface { + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Digest returns the sha256 of this index's manifest. + Digest() (Hash, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // IndexManifest returns this image index's manifest object. + IndexManifest() (*IndexManifest, error) + + // RawManifest returns the serialized bytes of IndexManifest(). + RawManifest() ([]byte, error) + + // Image returns a v1.Image that this ImageIndex references. + Image(Hash) (Image, error) + + // ImageIndex returns a v1.ImageIndex that this ImageIndex references. + ImageIndex(Hash) (ImageIndex, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go new file mode 100644 index 0000000000..57447d263d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Layer is an interface for accessing the properties of a particular layer of a v1.Image +type Layer interface { + // Digest returns the Hash of the compressed layer. + Digest() (Hash, error) + + // DiffID returns the Hash of the uncompressed layer. + DiffID() (Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) + + // MediaType returns the media type of the Layer. + MediaType() (types.MediaType, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go new file mode 100644 index 0000000000..22d483f3bd --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Manifest represents the OCI image manifest in a structured way. +type Manifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Config Descriptor `json:"config"` + Layers []Descriptor `json:"layers"` + Annotations map[string]string `json:"annotations,omitempty"` + Subject *Descriptor `json:"subject,omitempty"` +} + +// IndexManifest represents an OCI image index in a structured way. +type IndexManifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Manifests []Descriptor `json:"manifests"` + Annotations map[string]string `json:"annotations,omitempty"` + Subject *Descriptor `json:"subject,omitempty"` +} + +// Descriptor holds a reference from the manifest to one of its constituent elements. +type Descriptor struct { + MediaType types.MediaType `json:"mediaType"` + Size int64 `json:"size"` + Digest Hash `json:"digest"` + Data []byte `json:"data,omitempty"` + URLs []string `json:"urls,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Platform *Platform `json:"platform,omitempty"` + ArtifactType string `json:"artifactType,omitempty"` +} + +// ParseManifest parses the io.Reader's contents into a Manifest. +func ParseManifest(r io.Reader) (*Manifest, error) { + m := Manifest{} + if err := json.NewDecoder(r).Decode(&m); err != nil { + return nil, err + } + return &m, nil +} + +// ParseIndexManifest parses the io.Reader's contents into an IndexManifest. +func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { + im := IndexManifest{} + if err := json.NewDecoder(r).Decode(&im); err != nil { + return nil, err + } + return &im, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go new file mode 100644 index 0000000000..59ca402698 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go @@ -0,0 +1,149 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "fmt" + "sort" + "strings" +) + +// Platform represents the target os/arch for an image. +type Platform struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` +} + +func (p Platform) String() string { + if p.OS == "" { + return "" + } + var b strings.Builder + b.WriteString(p.OS) + if p.Architecture != "" { + b.WriteString("/") + b.WriteString(p.Architecture) + } + if p.Variant != "" { + b.WriteString("/") + b.WriteString(p.Variant) + } + if p.OSVersion != "" { + b.WriteString(":") + b.WriteString(p.OSVersion) + } + return b.String() +} + +// ParsePlatform parses a string representing a Platform, if possible. +func ParsePlatform(s string) (*Platform, error) { + var p Platform + parts := strings.Split(strings.TrimSpace(s), ":") + if len(parts) == 2 { + p.OSVersion = parts[1] + } + parts = strings.Split(parts[0], "/") + if len(parts) > 0 { + p.OS = parts[0] + } + if len(parts) > 1 { + p.Architecture = parts[1] + } + if len(parts) > 2 { + p.Variant = parts[2] + } + if len(parts) > 3 { + return nil, fmt.Errorf("too many slashes in platform spec: %s", s) + } + return &p, nil +} + +// Equals returns true if the given platform is semantically equivalent to this one. +// The order of Features and OSFeatures is not important. +func (p Platform) Equals(o Platform) bool { + return p.OS == o.OS && + p.Architecture == o.Architecture && + p.Variant == o.Variant && + p.OSVersion == o.OSVersion && + stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && + stringSliceEqualIgnoreOrder(p.Features, o.Features) +} + +// Satisfies returns true if this Platform "satisfies" the given spec Platform. +// +// Note that this is different from Equals and that Satisfies is not reflexive. +// +// The given spec represents "requirements" such that any missing values in the +// spec are not compared. +// +// For OSFeatures and Features, Satisfies will return true if this Platform's +// fields contain a superset of the values in the spec's fields (order ignored). +func (p Platform) Satisfies(spec Platform) bool { + return satisfies(spec.OS, p.OS) && + satisfies(spec.Architecture, p.Architecture) && + satisfies(spec.Variant, p.Variant) && + satisfies(spec.OSVersion, p.OSVersion) && + satisfiesList(spec.OSFeatures, p.OSFeatures) && + satisfiesList(spec.Features, p.Features) +} + +func satisfies(want, have string) bool { + return want == "" || want == have +} + +func satisfiesList(want, have []string) bool { + if len(want) == 0 { + return true + } + + set := map[string]struct{}{} + for _, h := range have { + set[h] = struct{}{} + } + + for _, w := range want { + if _, ok := set[w]; !ok { + return false + } + } + + return true +} + +// stringSliceEqual compares 2 string slices and returns if their contents are identical. +func stringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, elm := range a { + if elm != b[i] { + return false + } + } + return true +} + +// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order +func stringSliceEqualIgnoreOrder(a, b []string) bool { + if a != nil && b != nil { + sort.Strings(a) + sort.Strings(b) + } + return stringSliceEqual(a, b) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go b/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go new file mode 100644 index 0000000000..844f04d937 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go @@ -0,0 +1,25 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Update representation of an update of transfer progress. Some functions +// in this module can take a channel to which updates will be sent while a +// transfer is in progress. +// +k8s:deepcopy-gen=false +type Update struct { + Total int64 + Complete int64 + Error error +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go new file mode 100644 index 0000000000..c86657d7b8 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go @@ -0,0 +1,98 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types holds common OCI media types. +package types + +// MediaType is an enumeration of the supported mime types that an element of an image might have. +type MediaType string + +// The collection of known MediaType values. +const ( + OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" + OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" + OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" + OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" + OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" + OCILayerZStd MediaType = "application/vnd.oci.image.layer.v1.tar+zstd" + OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" + OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" + DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" + DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" + DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" + DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" + + OCIVendorPrefix = "vnd.oci" + DockerVendorPrefix = "vnd.docker" +) + +// IsDistributable returns true if a layer is distributable, see: +// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers +func (m MediaType) IsDistributable() bool { + switch m { + case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: + return false + } + return true +} + +// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index. +func (m MediaType) IsImage() bool { + switch m { + case OCIManifestSchema1, DockerManifestSchema2: + return true + } + return false +} + +// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image. +func (m MediaType) IsIndex() bool { + switch m { + case OCIImageIndex, DockerManifestList: + return true + } + return false +} + +// IsConfig returns true if the mediaType represents a config, as opposed to something else, like an image. +func (m MediaType) IsConfig() bool { + switch m { + case OCIConfigJSON, DockerConfigJSON: + return true + } + return false +} + +func (m MediaType) IsSchema1() bool { + switch m { + case DockerManifestSchema1, DockerManifestSchema1Signed: + return true + } + return false +} + +func (m MediaType) IsLayer() bool { + switch m { + case DockerLayer, DockerUncompressedLayer, OCILayer, OCILayerZStd, OCIUncompressedLayer, DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: + return true + } + return false +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go new file mode 100644 index 0000000000..a47b7475ed --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go @@ -0,0 +1,339 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = new(HealthConfig) + (*in).DeepCopyInto(*out) + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Shell != nil { + in, out := &in.Shell, &out.Shell + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]History, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.RootFS.DeepCopyInto(&out.RootFS) + in.Config.DeepCopyInto(&out.Config) + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. +func (in *ConfigFile) DeepCopy() *ConfigFile { + if in == nil { + return nil + } + out := new(ConfigFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Descriptor) DeepCopyInto(out *Descriptor) { + *out = *in + out.Digest = in.Digest + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(Platform) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. +func (in *Descriptor) DeepCopy() *Descriptor { + if in == nil { + return nil + } + out := new(Descriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hash) DeepCopyInto(out *Hash) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. +func (in *Hash) DeepCopy() *Hash { + if in == nil { + return nil + } + out := new(Hash) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { + *out = *in + if in.Test != nil { + in, out := &in.Test, &out.Test + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig. +func (in *HealthConfig) DeepCopy() *HealthConfig { + if in == nil { + return nil + } + out := new(HealthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *History) DeepCopyInto(out *History) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. +func (in *History) DeepCopy() *History { + if in == nil { + return nil + } + out := new(History) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { + *out = *in + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(Descriptor) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest. +func (in *IndexManifest) DeepCopy() *IndexManifest { + if in == nil { + return nil + } + out := new(IndexManifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Manifest) DeepCopyInto(out *Manifest) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(Descriptor) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. +func (in *Manifest) DeepCopy() *Manifest { + if in == nil { + return nil + } + out := new(Manifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootFS) DeepCopyInto(out *RootFS) { + *out = *in + if in.DiffIDs != nil { + in, out := &in.DiffIDs, &out.DiffIDs + *out = make([]Hash, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. +func (in *RootFS) DeepCopy() *RootFS { + if in == nil { + return nil + } + out := new(RootFS) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/google/go-intervals/LICENSE b/vendor/github.com/google/go-intervals/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/google/go-intervals/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/go-intervals/intervalset/intervalset.go b/vendor/github.com/google/go-intervals/intervalset/intervalset.go new file mode 100644 index 0000000000..6a33db63cd --- /dev/null +++ b/vendor/github.com/google/go-intervals/intervalset/intervalset.go @@ -0,0 +1,545 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package intervalset provides an abtraction for dealing with sets of +// 1-dimensional spans, such as sets of time ranges. The Set type provides set +// arithmetic and enumeration methods based on an Interval interface. +// +// DISCLAIMER: This library is not yet stable, so expect breaking changes. +package intervalset + +import ( + "fmt" + "sort" + "strings" +) + +// Interval is the interface for a continuous or discrete span. The interval is +// assumed to be inclusive of the starting point and exclusive of the ending +// point. +// +// All methods in the interface are non-destructive: Calls to the methods should +// not modify the interval. Furthermore, the implementation assumes an interval +// will not be mutated by user code, either. +type Interval interface { + // Intersect returns the intersection of an interval with another + // interval. The function may panic if the other interval is incompatible. + Intersect(Interval) Interval + + // Before returns true if the interval is completely before another interval. + Before(Interval) bool + + // IsZero returns true for the zero value of an interval. + IsZero() bool + + // Bisect returns two intervals, one on the lower side of x and one on the + // upper side of x, corresponding to the subtraction of x from the original + // interval. The returned intervals are always within the range of the + // original interval. + Bisect(x Interval) (Interval, Interval) + + // Adjoin returns the union of two intervals, if the intervals are exactly + // adjacent, or the zero interval if they are not. + Adjoin(Interval) Interval + + // Encompass returns an interval that covers the exact extents of two + // intervals. + Encompass(Interval) Interval +} + +// Set is a set of interval objects used for +type Set struct { + //non-overlapping intervals + intervals []Interval + // factory is needed when the extents of the empty set are needed. + factory intervalFactory +} + +// SetInput is an interface implemented by Set and ImmutableSet. It is used when +// one of these types type must take a set as an argument. +type SetInput interface { + // Extent returns the Interval defined by the minimum and maximum values of + // the set. + Extent() Interval + + // IntervalsBetween iterates over the intervals within extents set and calls f + // with each. If f returns false, iteration ceases. + // + // Any interval within the set that overlaps partially with extents is truncated + // before being passed to f. + IntervalsBetween(extents Interval, f IntervalReceiver) +} + +// NewSet returns a new set given a sorted slice of intervals. This function +// panics if the intervals are not sorted. +func NewSet(intervals []Interval) *Set { + return NewSetV1(intervals, oldBehaviorFactory.makeZero) +} + +// NewSetV1 returns a new set given a sorted slice of intervals. This function +// panics if the intervals are not sorted. +// +// NewSetV1 will be renamed and will replace NewSet in the v1 release. +func NewSetV1(intervals []Interval, makeZero func() Interval) *Set { + if err := CheckSorted(intervals); err != nil { + panic(err) + } + return &Set{intervals, makeIntervalFactor(makeZero)} +} + +// CheckSorted checks that interval[i+1] is not before interval[i] for all +// relevant elements of the input slice. Nil is returned when len(intervals) is +// 0 or 1. +func CheckSorted(intervals []Interval) error { + for i := 0; i < len(intervals)-1; i++ { + if !intervals[i].Before(intervals[i+1]) { + return fmt.Errorf("!intervals[%d].Before(intervals[%d]) for %s, %s", i, i+1, intervals[i], intervals[i+1]) + } + } + return nil +} + +// Empty returns a new, empty set of intervals. +func Empty() *Set { + return EmptyV1(oldBehaviorFactory.makeZero) +} + +// EmptyV1 returns a new, empty set of intervals using the semantics of the V1 +// API, which will require a factory method for construction of an empty interval. +func EmptyV1(makeZero func() Interval) *Set { + return &Set{nil, makeIntervalFactor(makeZero)} +} + +// Copy returns a copy of a set that may be mutated without affecting the original. +func (s *Set) Copy() *Set { + return &Set{append([]Interval(nil), s.intervals...), s.factory} +} + +// String returns a human-friendly representation of the set. +func (s *Set) String() string { + var strs []string + for _, x := range s.intervals { + strs = append(strs, fmt.Sprintf("%s", x)) + } + return fmt.Sprintf("{%s}", strings.Join(strs, ", ")) +} + +// Extent returns the Interval defined by the minimum and maximum values of the +// set. +func (s *Set) Extent() Interval { + if len(s.intervals) == 0 { + return s.factory.makeZero() + } + return s.intervals[0].Encompass(s.intervals[len(s.intervals)-1]) +} + +// Add adds all the elements of another set to this set. +func (s *Set) Add(b SetInput) { + // Deal with nil extent. See https://github.com/google/go-intervals/issues/6. + bExtent := b.Extent() + if bExtent == nil { + return // no changes needed + } + + // Loop through the intervals of x + b.IntervalsBetween(bExtent, func(x Interval) bool { + s.insert(x) + return true + }) +} + +// Contains reports whether an interval is entirely contained by the set. +func (s *Set) Contains(ival Interval) bool { + // Loop through the intervals of x + next := s.iterator(ival, true) + for setInterval := next(); setInterval != nil; setInterval = next() { + left, right := ival.Bisect(setInterval) + if !left.IsZero() { + return false + } + ival = right + } + return ival.IsZero() +} + +// adjoinOrAppend adds an interval to the end of intervals unless that value +// directly adjoins the last element of intervals, in which case the last +// element will be replaced by the adjoined interval. +func adjoinOrAppend(intervals []Interval, x Interval) []Interval { + lastIndex := len(intervals) - 1 + if lastIndex == -1 { + return append(intervals, x) + } + adjoined := intervals[lastIndex].Adjoin(x) + if adjoined.IsZero() { + return append(intervals, x) + } + intervals[lastIndex] = adjoined + return intervals +} + +func (s *Set) insert(insertion Interval) { + if s.Contains(insertion) { + return + } + // TODO(reddaly): Something like Java's ArrayList would allow both O(log(n)) + // insertion and O(log(n)) lookup. For now, we have O(log(n)) lookup and O(n) + // insertion. + var newIntervals []Interval + push := func(x Interval) { + newIntervals = adjoinOrAppend(newIntervals, x) + } + inserted := false + for _, x := range s.intervals { + if inserted { + push(x) + continue + } + if insertion.Before(x) { + push(insertion) + push(x) + inserted = true + continue + } + // [===left===)[==x===)[===right===) + left, right := insertion.Bisect(x) + if !left.IsZero() { + push(left) + } + push(x) + // Replace the interval being inserted with the remaining portion of the + // interval to be inserted. + if right.IsZero() { + inserted = true + } else { + insertion = right + } + } + if !inserted { + push(insertion) + } + s.intervals = newIntervals +} + +// Sub destructively modifies the set by subtracting b. +func (s *Set) Sub(b SetInput) { + extent := s.Extent() + // Deal with nil extent. See https://github.com/google/go-intervals/issues/6. + if extent == nil { + // Set is already empty, no changes necessary. + return + } + var newIntervals []Interval + push := func(x Interval) { + newIntervals = adjoinOrAppend(newIntervals, x) + } + nextX := s.iterator(extent, true) + nextY, cancel := setIntervalIterator(b, extent) + defer cancel() + + x := nextX() + y := nextY() + for x != nil { + // If y == nil, all of the remaining intervals in A are to the right of B, + // so just yield them. + if y == nil { + push(x) + x = nextX() + continue + } + // Split x into parts left and right of y. + // The diagrams below show the bisection results for various situations. + // if left.IsZero() && !right.IsZero() + // xxx + // y1y1 y2y2 y3 y4y4 + // xxx + // or + // xxxxxxxxxxxx + // y1y1 y2y2 y3 y4y4 + // + // if !left.IsZero() && !right.IsZero() + // x1x1x1x1x1 + // y1 y2 + // + // if left.IsZero() && right.IsZero() + // x1x1x1x1 x2x2x2 + // y1y1y1y1y1y1y1 + // + // if !left.IsZero() && right.IsZero() + // x1x1 x2 + // y1y1y1y1 + left, right := x.Bisect(y) + + // If the left side of x is non-zero, it can definitely be pushed to the + // resulting interval set since no subsequent y value will intersect it. + // The sequences look something like + // x1x1x1x1x1 OR x1x1x1 x2 + // y1 y2 y1y1y1 + // left = x1x1 x1x1x1 + // right = x1x1 {zero} + if !left.IsZero() { + push(left) + } + + if !right.IsZero() { + // If the right side of x is non-zero: + // 1) Right is the remaining portion of x that needs to be pushed. + x = right + // 2) It's not possible for current y to intersect it, so advance y. It's + // possible nextY() will intersect it, so don't push yet. + y = nextY() + } else { + // There's nothing left of x to push, so advance x. + x = nextX() + } + } + + // Setting s.intervals is the only side effect in this function. + s.intervals = newIntervals +} + +// intersectionIterator returns a function that yields intervals that are +// members of the intersection of s and b, in increasing order. +func (s *Set) intersectionIterator(b SetInput) (iter func() Interval, cancel func()) { + return intervalMapperToIterator(func(f IntervalReceiver) { + sExtent, bExtent := s.Extent(), b.Extent() + // Deal with nil extent. See https://github.com/google/go-intervals/issues/6. + if sExtent == nil || bExtent == nil { + // IF either set is already empty, the intersection is empty. This + // voids a panic below where a valid Interval is needed for each + // extent. + return + } + nextX := s.iterator(bExtent, true) + nextY, cancel := setIntervalIterator(b, sExtent) + defer cancel() + + x := nextX() + y := nextY() + // Loop through corresponding intervals of S and B. + // If y == nil, all of the remaining intervals in S are to the right of B. + // If x == nil, all of the remaining intervals in B are to the right of S. + for x != nil && y != nil { + if x.Before(y) { + x = nextX() + continue + } + if y.Before(x) { + y = nextY() + continue + } + xyIntersect := x.Intersect(y) + if !xyIntersect.IsZero() { + if !f(xyIntersect) { + return + } + _, right := x.Bisect(y) + if !right.IsZero() { + x = right + } else { + x = nextX() + } + } + } + }) +} + +// Intersect destructively modifies the set by intersectin it with b. +func (s *Set) Intersect(b SetInput) { + iter, cancel := s.intersectionIterator(b) + defer cancel() + var newIntervals []Interval + for x := iter(); x != nil; x = iter() { + newIntervals = append(newIntervals, x) + } + s.intervals = newIntervals +} + +// searchLow returns the first index in s.intervals that is not before x. +func (s *Set) searchLow(x Interval) int { + return sort.Search(len(s.intervals), func(i int) bool { + return !s.intervals[i].Before(x) + }) +} + +// searchLow returns the index of the first interval in s.intervals that is +// entirely after x. +func (s *Set) searchHigh(x Interval) int { + return sort.Search(len(s.intervals), func(i int) bool { + return x.Before(s.intervals[i]) + }) +} + +// iterator returns a function that yields elements of the set in order. +// +// The function returned will return nil when finished iterating. +func (s *Set) iterator(extents Interval, forward bool) func() Interval { + low, high := s.searchLow(extents), s.searchHigh(extents) + + i, stride := low, 1 + if !forward { + i, stride = high-1, -1 + } + + return func() Interval { + if i < 0 || i >= len(s.intervals) { + return nil + } + x := s.intervals[i] + i += stride + return x + } +} + +// IntervalReceiver is a function used for iterating over a set of intervals. It +// takes the start and end times and returns true if the iteration should +// continue. +type IntervalReceiver func(Interval) bool + +// IntervalsBetween iterates over the intervals within extents set and calls f +// with each. If f returns false, iteration ceases. +// +// Any interval within the set that overlaps partially with extents is truncated +// before being passed to f. +func (s *Set) IntervalsBetween(extents Interval, f IntervalReceiver) { + // Begin = first index in s.intervals that is not before extents. + begin := sort.Search(len(s.intervals), func(i int) bool { + return !s.intervals[i].Before(extents) + }) + + // TODO(reddaly): Optimize this by performing a binary search for the ending + // point. + for _, interval := range s.intervals[begin:] { + // If the interval is after the extents, there will be no more overlap, so + // break out of the loop. + if extents.Before(interval) { + break + } + portionOfInterval := extents.Intersect(interval) + if portionOfInterval.IsZero() { + continue + } + + if !f(portionOfInterval) { + return + } + } +} + +// Intervals iterates over all the intervals within the set and calls f with +// each one. If f returns false, iteration ceases. +func (s *Set) Intervals(f IntervalReceiver) { + for _, interval := range s.intervals { + if !f(interval) { + return + } + } +} + +// AllIntervals returns an ordered slice of all the intervals in the set. +func (s *Set) AllIntervals() []Interval { + return append(make([]Interval, 0, len(s.intervals)), s.intervals...) +} + +// ImmutableSet returns an immutable copy of this set. +func (s *Set) ImmutableSet() *ImmutableSet { + return NewImmutableSet(s.AllIntervals()) +} + +// mapFn reports true if an iteration should continue. It is called on values of +// a collection. +type mapFn func(interface{}) bool + +// mapFn calls mapFn for each member of a collection. +type mapperFn func(mapFn) + +// iteratorFn returns the next item in an iteration or the zero value. The +// second return value indicates whether the first return value is a member of +// the collection. +type iteratorFn func() (interface{}, bool) + +// generatorFn returns an iterator. +type generatorFn func() iteratorFn + +// cancelFn should be called to clean up the goroutine that would otherwise leak. +type cancelFn func() + +// mapperToIterator returns an iteratorFn version of a mappingFn. The second +// return value must be called at the end of iteration, or the underlying +// goroutine will leak. +func mapperToIterator(m mapperFn) (iteratorFn, cancelFn) { + generatedValues := make(chan interface{}, 1) + stopCh := make(chan interface{}, 1) + go func() { + m(func(obj interface{}) bool { + select { + case <-stopCh: + return false + case generatedValues <- obj: + return true + } + }) + close(generatedValues) + }() + iter := func() (interface{}, bool) { + value, ok := <-generatedValues + return value, ok + } + return iter, func() { + stopCh <- nil + } +} + +func intervalMapperToIterator(mapper func(IntervalReceiver)) (iter func() Interval, cancel func()) { + genericMapper := func(m mapFn) { + mapper(func(ival Interval) bool { + return m(ival) + }) + } + + genericIter, cancel := mapperToIterator(genericMapper) + return func() Interval { + genericVal, iterationEnded := genericIter() + if !iterationEnded { + return nil + } + ival, ok := genericVal.(Interval) + if !ok { + panic("unexpected value type, internal error") + } + return ival + }, cancel +} + +func setIntervalIterator(s SetInput, extent Interval) (iter func() Interval, cancel func()) { + return intervalMapperToIterator(func(f IntervalReceiver) { + s.IntervalsBetween(extent, f) + }) +} + +// oldBehaviorFactory returns a nil interval. This was used before +// construction of a Set/ImmutableSet required passing in a factory method for +// creating a zero interval object. +var oldBehaviorFactory = makeIntervalFactor(func() Interval { return nil }) + +// intervalFactory is used to construct a zero-value interval. The zero value +// interval may be different for different types of intervals, so a factory is +// sometimes needed to write generic algorithms about intervals. +type intervalFactory struct { + makeZero func() Interval +} + +func makeIntervalFactor(makeZero func() Interval) intervalFactory { + return intervalFactory{makeZero} +} diff --git a/vendor/github.com/google/go-intervals/intervalset/intervalset_immutable.go b/vendor/github.com/google/go-intervals/intervalset/intervalset_immutable.go new file mode 100644 index 0000000000..f6d5cbb527 --- /dev/null +++ b/vendor/github.com/google/go-intervals/intervalset/intervalset_immutable.go @@ -0,0 +1,85 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package intervalset + +// ImmutableSet is a set of interval objects. It provides various set theory +// operations. +type ImmutableSet struct { + set *Set +} + +// NewImmutableSet returns a new set given a sorted slice of intervals. This +// function panics if the intervals are not sorted. +func NewImmutableSet(intervals []Interval) *ImmutableSet { + return NewImmutableSetV1(intervals, oldBehaviorFactory.makeZero) +} + +// NewImmutableSetV1 returns a new set given a sorted slice of intervals. This +// function panics if the intervals are not sorted. +func NewImmutableSetV1(intervals []Interval, makeZero func() Interval) *ImmutableSet { + return &ImmutableSet{NewSetV1(intervals, makeZero)} +} + +// String returns a human-friendly representation of the set. +func (s *ImmutableSet) String() string { + return s.set.String() +} + +// Extent returns the Interval defined by the minimum and maximum values of the +// set. +func (s *ImmutableSet) Extent() Interval { + return s.set.Extent() +} + +// Contains reports whether an interval is entirely contained by the set. +func (s *ImmutableSet) Contains(ival Interval) bool { + return s.set.Contains(ival) +} + +// Union returns a set with the contents of this set and another set. +func (s *ImmutableSet) Union(b SetInput) *ImmutableSet { + union := s.set.Copy() + union.Add(b) + return &ImmutableSet{union} +} + +// Sub returns a set without the intervals of another set. +func (s *ImmutableSet) Sub(b SetInput) *ImmutableSet { + x := s.set.Copy() + x.Sub(b) + return &ImmutableSet{x} +} + +// Intersect returns the intersection of two sets. +func (s *ImmutableSet) Intersect(b SetInput) *ImmutableSet { + x := s.set.Copy() + x.Intersect(b) + return &ImmutableSet{x} +} + +// IntervalsBetween iterates over the intervals within extents set and calls f +// with each. If f returns false, iteration ceases. +// +// Any interval within the set that overlaps partially with extents is truncated +// before being passed to f. +func (s *ImmutableSet) IntervalsBetween(extents Interval, f IntervalReceiver) { + s.set.IntervalsBetween(extents, f) +} + +// Intervals iterates over all the intervals within the set and calls f with +// each one. If f returns false, iteration ceases. +func (s *ImmutableSet) Intervals(f IntervalReceiver) { + s.set.Intervals(f) +} diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml new file mode 100644 index 0000000000..ebd5edd898 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/.travis.yml @@ -0,0 +1,16 @@ +arch: + - amd64 + - ppc64le +language: go +sudo: false +go: + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE new file mode 100644 index 0000000000..740fa93132 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md new file mode 100644 index 0000000000..bdd531918c --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -0,0 +1,55 @@ +# go-shellwords + +[![codecov](https://codecov.io/gh/mattn/go-shellwords/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-shellwords) +[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/mattn/go-shellwords)](https://pkg.go.dev/github.com/mattn/go-shellwords) +[![ci](https://github.com/mattn/go-shellwords/ci/badge.svg)](https://github.com/mattn/go-shellwords/actions) + +Parse line as shell words. + +## Usage + +```go +args, err := shellwords.Parse("./foo --bar=baz") +// args should be ["./foo", "--bar=baz"] +``` + +```go +envs, args, err := shellwords.ParseWithEnvs("FOO=foo BAR=baz ./foo --bar=baz") +// envs should be ["FOO=foo", "BAR=baz"] +// args should be ["./foo", "--bar=baz"] +``` + +```go +os.Setenv("FOO", "bar") +p := shellwords.NewParser() +p.ParseEnv = true +args, err := p.Parse("./foo $FOO") +// args should be ["./foo", "bar"] +``` + +```go +p := shellwords.NewParser() +p.ParseBacktick = true +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +```go +shellwords.ParseBacktick = true +p := shellwords.NewParser() +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +# Thanks + +This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). + +# License + +under the MIT License: http://mattn.mit-license.org/2017 + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/go.test.sh b/vendor/github.com/mattn/go-shellwords/go.test.sh new file mode 100644 index 0000000000..a7deaca96a --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go new file mode 100644 index 0000000000..1b42a00170 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -0,0 +1,317 @@ +package shellwords + +import ( + "bytes" + "errors" + "os" + "strings" + "unicode" +) + +var ( + ParseEnv bool = false + ParseBacktick bool = false +) + +func isSpace(r rune) bool { + switch r { + case ' ', '\t', '\r', '\n': + return true + } + return false +} + +func replaceEnv(getenv func(string) string, s string) string { + if getenv == nil { + getenv = os.Getenv + } + + var buf bytes.Buffer + rs := []rune(s) + for i := 0; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + break + } + buf.WriteRune(rs[i]) + continue + } else if r == '$' { + i++ + if i == len(rs) { + buf.WriteRune(r) + break + } + if rs[i] == 0x7b { + i++ + p := i + for ; i < len(rs); i++ { + r = rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if r == 0x7d || (!unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r)) { + break + } + } + if r != 0x7d { + return s + } + if i > p { + buf.WriteString(getenv(s[p:i])) + } + } else { + p := i + for ; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) { + break + } + } + if i > p { + buf.WriteString(getenv(s[p:i])) + i-- + } else { + buf.WriteString(s[p:]) + } + } + } else { + buf.WriteRune(r) + } + } + return buf.String() +} + +type Parser struct { + ParseEnv bool + ParseBacktick bool + Position int + Dir string + + // If ParseEnv is true, use this for getenv. + // If nil, use os.Getenv. + Getenv func(string) string +} + +func NewParser() *Parser { + return &Parser{ + ParseEnv: ParseEnv, + ParseBacktick: ParseBacktick, + Position: 0, + Dir: "", + } +} + +type argType int + +const ( + argNo argType = iota + argSingle + argQuoted +) + +func (p *Parser) Parse(line string) ([]string, error) { + args := []string{} + buf := "" + var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool + backtick := "" + + pos := -1 + got := argNo + + i := -1 +loop: + for _, r := range line { + i++ + if escaped { + buf += string(r) + escaped = false + got = argSingle + continue + } + + if r == '\\' { + if singleQuoted { + buf += string(r) + } else { + escaped = true + } + continue + } + + if isSpace(r) { + if singleQuoted || doubleQuoted || backQuote || dollarQuote { + buf += string(r) + backtick += string(r) + } else if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + buf = "" + got = argNo + } + continue + } + + switch r { + case '`': + if !singleQuoted && !doubleQuoted && !dollarQuote { + if p.ParseBacktick { + if backQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)] + out + } + backtick = "" + backQuote = !backQuote + continue + } + backtick = "" + backQuote = !backQuote + } + case ')': + if !singleQuoted && !doubleQuoted && !backQuote { + if p.ParseBacktick { + if dollarQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)-2] + out + } + backtick = "" + dollarQuote = !dollarQuote + continue + } + backtick = "" + dollarQuote = !dollarQuote + } + case '(': + if !singleQuoted && !doubleQuoted && !backQuote { + if !dollarQuote && strings.HasSuffix(buf, "$") { + dollarQuote = true + buf += "(" + continue + } else { + return nil, errors.New("invalid command line string") + } + } + case '"': + if !singleQuoted && !dollarQuote { + if doubleQuoted { + got = argQuoted + } + doubleQuoted = !doubleQuoted + continue + } + case '\'': + if !doubleQuoted && !dollarQuote { + if singleQuoted { + got = argQuoted + } + singleQuoted = !singleQuoted + continue + } + case ';', '&', '|', '<', '>': + if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) { + if r == '>' && len(buf) > 0 { + if c := buf[0]; '0' <= c && c <= '9' { + i -= 1 + got = argNo + } + } + pos = i + break loop + } + } + + got = argSingle + buf += string(r) + if backQuote || dollarQuote { + backtick += string(r) + } + } + + if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + } + + if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote { + return nil, errors.New("invalid command line string") + } + + p.Position = pos + + return args, nil +} + +func (p *Parser) ParseWithEnvs(line string) (envs []string, args []string, err error) { + _args, err := p.Parse(line) + if err != nil { + return nil, nil, err + } + envs = []string{} + args = []string{} + parsingEnv := true + for _, arg := range _args { + if parsingEnv && isEnv(arg) { + envs = append(envs, arg) + } else { + if parsingEnv { + parsingEnv = false + } + args = append(args, arg) + } + } + return envs, args, nil +} + +func isEnv(arg string) bool { + return len(strings.Split(arg, "=")) == 2 +} + +func Parse(line string) ([]string, error) { + return NewParser().Parse(line) +} + +func ParseWithEnvs(line string) (envs []string, args []string, err error) { + return NewParser().ParseWithEnvs(line) +} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go new file mode 100644 index 0000000000..b56a90120a --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -0,0 +1,29 @@ +// +build !windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("SHELL"); shell == "" { + shell = "/bin/sh" + } + cmd := exec.Command(shell, "-c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go new file mode 100644 index 0000000000..fd738a7211 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("COMSPEC"); shell == "" { + shell = "cmd" + } + cmd := exec.Command(shell, "/c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.envrc b/vendor/github.com/mistifyio/go-zfs/v3/.envrc new file mode 100644 index 0000000000..f310aea665 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/.envrc @@ -0,0 +1,4 @@ +has nix && use nix +dotenv_if_exists +PATH_add bin +path_add GOBIN bin diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.gitignore b/vendor/github.com/mistifyio/go-zfs/v3/.gitignore new file mode 100644 index 0000000000..0867490ad5 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/.gitignore @@ -0,0 +1,6 @@ +bin +go-zfs.test +.vagrant + +# added by lint-install +out/ diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml b/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml new file mode 100644 index 0000000000..499c3eca16 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml @@ -0,0 +1,207 @@ +run: + # The default runtime timeout is 1m, which doesn't work well on Github Actions. + timeout: 4m + +# NOTE: This file is populated by the lint-install tool. Local adjustments may be overwritten. +linters-settings: + cyclop: + # NOTE: This is a very high transitional threshold + max-complexity: 37 + package-average: 34.0 + skip-tests: true + + gocognit: + # NOTE: This is a very high transitional threshold + min-complexity: 98 + + dupl: + threshold: 200 + + goconst: + min-len: 4 + min-occurrences: 5 + ignore-tests: true + + gosec: + excludes: + - G107 # Potential HTTP request made with variable url + - G204 # Subprocess launched with function call as argument or cmd arguments + - G404 # Use of weak random number generator (math/rand instead of crypto/rand + + errorlint: + # these are still common in Go: for instance, exit errors. + asserts: false + + exhaustive: + default-signifies-exhaustive: true + + nestif: + min-complexity: 8 + + nolintlint: + require-explanation: true + allow-unused: false + require-specific: true + + revive: + ignore-generated-header: true + severity: warning + rules: + - name: atomic + - name: blank-imports + - name: bool-literal-in-expr + - name: confusing-naming + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: deep-exit + - name: defer + - name: range-val-in-closure + - name: range-val-address + - name: dot-imports + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + - name: indent-error-flow + - name: package-comments + - name: range + - name: receiver-naming + - name: redefines-builtin-id + - name: superfluous-else + - name: struct-tag + - name: time-naming + - name: unexported-naming + - name: unexported-return + - name: unnecessary-stmt + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + - name: unconditional-recursion + - name: waitgroup-by-value + + staticcheck: + go: "1.16" + + unused: + go: "1.16" + +output: + sort-results: true + +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - cyclop + - deadcode + - dogsled + - dupl + - durationcheck + - errcheck + - errname + - errorlint + - exhaustive + - exportloopref + - forcetypeassert + - gocognit + - goconst + - gocritic + - godot + - gofmt + - gofumpt + - gosec + - goheader + - goimports + - goprintffuncname + - gosimple + - govet + - ifshort + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nestif + - nilerr + - noctx + - nolintlint + - predeclared + # disabling for the initial iteration of the linting tool + # - promlinter + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - structcheck + - stylecheck + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - varcheck + - wastedassign + - whitespace + + # Disabled linters, due to being misaligned with Go practices + # - exhaustivestruct + # - gochecknoglobals + # - gochecknoinits + # - goconst + # - godox + # - goerr113 + # - gomnd + # - lll + # - nlreturn + # - testpackage + # - wsl + # Disabled linters, due to not being relevant to our code base: + # - maligned + # - prealloc "For most programs usage of prealloc will be a premature optimization." + # Disabled linters due to bad error messages or bugs + # - tagliatelle + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - dupl + - errcheck + - forcetypeassert + - gocyclo + - gosec + - noctx + + - path: .*cmd.* + linters: + - noctx + + - path: main\.go + linters: + - noctx + + - path: .*cmd.* + text: "deep-exit" + + - path: main\.go + text: "deep-exit" + + # This check is of questionable value + - linters: + - tparallel + text: "call t.Parallel on the top level as well as its subtests" + + # Don't hide lint issues just because there are many of them + max-same-issues: 0 + max-issues-per-linter: 0 diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.yamllint b/vendor/github.com/mistifyio/go-zfs/v3/.yamllint new file mode 100644 index 0000000000..9a08ad1765 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/.yamllint @@ -0,0 +1,16 @@ +--- +extends: default + +rules: + braces: + max-spaces-inside: 1 + brackets: + max-spaces-inside: 1 + comments: disable + comments-indentation: disable + document-start: disable + line-length: + level: warning + max: 160 + allow-non-breakable-inline-mappings: true + truthy: disable diff --git a/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md b/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md new file mode 100644 index 0000000000..349245d039 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md @@ -0,0 +1,250 @@ +# Change Log + +All notable changes to this project will be documented in this file. +This project adheres to [Semantic Versioning](http://semver.org/). +This change log follows the advice of [Keep a CHANGELOG](https://github.com/olivierlacan/keep-a-changelog). + +## [Unreleased] + +## [3.0.0] - 2022-03-30 + +### Added + +- Rename, Mount and Unmount methods +- Parse more fields into Zpool type: + - dedupratio + - fragmentation + - freeing + - leaked + - readonly +- Parse more fields into Dataset type: + - referenced +- Incremental Send +- Parse numbers in exact format +- Support for Solaris (non-blockint, best-effort status) +- Debug logging for command invocation +- Use GitHub Actions for CI +- Nix shell for dev env reproducibility +- Direnv file for ease of dev +- Formatting/lint checks (enforced by CI) +- Go Module +- FreeBSD based vagrant machine + +### Changed + +- Temporarily adjust TestDiff expected strings depending on ZFS version +- Use one `zfs list`/`zpool list` call instead of many `zfs get`/`zpool get` +- ZFS docs links now point to OpenZFS pages +- Ubuntu vagrant box changed to generic/ubuntu2004 + +### Fixed + +- `GetProperty` returning `VALUE` instead of the actual value + +### Shortlog + + Amit Krishnan (1): + Issue #39 and Issue #40 - Enable Solaris support for go-zfs Switch from zfs/zpool get to zfs/zpool list for better performance Signed-off-by: Amit Krishnan + + Anand Patil (3): + Added Rename + Small fix to rename. + Added mount and umount methods + + Brian Akins (1): + Add 'referenced' to zfs properties + + Brian Bickerton (3): + Add debug logging before and after running external zfs command + Don't export the default no-op logger + Update uuid package repo url + + Dmitry Teselkin (1): + Issue #52 - fix parseLine for fragmentation field + + Edward Betts (1): + correct spelling mistake + + Justin Cormack (1): + Switch to google/uuid which is the maintained version of pborman/uuid + + Manuel Mendez (40): + rename Umount -> Unmount to follow zfs command name + add missing Unmount/Mount docs + always allocate largest Mount slice + add travis config + travis: update to go 1.7 + travis: get go deps first + test: add nok helper to verify an error occurred + test: add test for Dataset.GetProperty + ci: swap #cerana on freenode for slack + ci: install new deps for 0.7 relases + ci: bump zol versions + ci: bump go versions + ci: use better gometalinter invocations + ci: add ccache + ci: set env earlier in before_install + fix test nok error printing + test: restructure TestDiff to deal with different order of changes + test: better unicode path handling in TestDiff + travis: bump zfs and go versions + cache zfs artifacts + Add nix-shell and direnv goodness + prettierify all the files + Add go based tools + Add Makefile and rules.mk files + gofumptize the code base + Use tinkerbell/lint-install to setup linters + make golangci-lint happy + Update CONTRIBUTING.md with make based approach + Add GitHub Actions + Drop Travis CI + One sentence per line + Update documentation links to openzfs-docs pages + Format Vagrantfile using rufo + Add go-zfs.test to .gitignore + test: Avoid reptitive/duplicate error logging and quitting + test: Use t.Logf instead of fmt.Printf + test: Better cleanup and error handling in zpoolTest + test: Do not mark TestDatasets as a t.Helper. + test: Change zpoolTest to a pure helper that returns a clean up function + test: Move helpers to a different file + vagrant: Add set -euxo pipefail to provision script + vagrant: Update to generic/ubuntu2004 + vagrant: Minor fixes to Vagrantfile + vagrant: Update to go 1.17.8 + vagrant: Run go tests as part of provision script + vagrant: Indent heredoc script + vagrant: Add freebsd machine + + Matt Layher (1): + Parse more fields into Zpool type + + Michael Crosby (1): + Add incremental send + + Rikard Gynnerstedt (1): + remove command name from joined args + + Sebastiaan van Stijn (1): + Add go.mod and rename to github.com/mistifyio/go-zfs/v3 (v3.0.0) + + mikudeko (1): + Fix GetProperty always returning 'VALUE' + +## [2.1.1] - 2015-05-29 + +### Fixed + +- Ignoring first pool listed +- Incorrect `zfs get` argument ordering + +### Shortlog + + Alexey Guskov (1): + zfs command uses different order of arguments on freebsd + + Brian Akins (4): + test that ListZpools returns expected zpool + test error first + test error first + fix test to check correct return value + + James Cunningham (1): + Fix Truncating First Zpool + + Pat Norton (2): + Added Use of Go Tools + Update CONTRIBUTING.md + +## [2.1.0] - 2014-12-08 + +### Added + +- Parse hardlink modification count returned from `zfs diff` + +### Fixed + +- Continuing instead of erroring when rolling back a non-snapshot + +### Shortlog + + Brian Akins (2): + need to return the error here + use named struct fields + + Jörg Thalheim (1): + zfs diff handle hardlinks modification now + +## [2.0.0] - 2014-12-02 + +### Added + +- Flags for Destroy: + - DESTROY_DEFAULT + - DESTROY_DEFER_DELETION (`zfs destroy ... -d`) + - DESTROY_FORCE (`zfs destroy ... -f`) + - DESTROY_RECURSIVE_CLONES (`zfs destroy ... -R`) + - DESTROY_RECURSIVE (`zfs destroy ... -r`) + - etc +- Diff method (`zfs diff`) +- LogicalUsed and Origin properties to Dataset +- Type constants for Dataset +- State constants for Zpool +- Logger interface +- Improve documentation + +### Shortlog + + Brian Akins (8): + remove reflection + style change for switches + need to check for error + keep in scope + go 1.3.3 + golint cleanup + Just test if logical used is greater than 0, as this appears to be implementation specific + add docs to satisfy golint + + Jörg Thalheim (8): + Add deferred flag to zfs.Destroy() + add Logicalused property + Add Origin property + gofmt + Add zfs.Diff + Add Logger + add recursive destroy with clones + use CamelCase-style constants + + Matt Layher (4): + Improve documentation, document common ZFS operations, provide more references + Add zpool state constants, for easier health checking + Add dataset type constants, for easier type checking + Fix string split in command.Run(), use strings.Fields() instead of strings.Split() + +## [1.0.0] - 2014-11-12 + +### Shortlog + + Brian Akins (7): + add godoc badge + Add example + add information about zpool to struct and parser + Add Quota + add Children call + add Children call + fix snapshot tests + + Brian Bickerton (3): + MIST-150 Change Snapshot second paramater from properties map[string][string] to recursive bool + MIST-150 Add Rollback method and related tests + MIST-160 Add SendSnapshot streaming method and tests + + Matt Layher (1): + Add Error struct type and tests, enabling easier error return checking + +[3.0.0]: https://github.com/mistifyio/go-zfs/compare/v2.1.1...v3.0.0 +[2.1.1]: https://github.com/mistifyio/go-zfs/compare/v2.1.0...v2.1.1 +[2.1.0]: https://github.com/mistifyio/go-zfs/compare/v2.0.0...v2.1.0 +[2.0.0]: https://github.com/mistifyio/go-zfs/compare/v1.0.0...v2.0.0 +[1.0.0]: https://github.com/mistifyio/go-zfs/compare/v0.0.0...v1.0.0 diff --git a/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md b/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md new file mode 100644 index 0000000000..9f625d5646 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md @@ -0,0 +1,64 @@ +## How to Contribute + +We always welcome contributions to help make `go-zfs` better. +Please take a moment to read this document if you would like to contribute. + +### Reporting issues + +We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests. + +If you find a bug: + +- Use the GitHub issue search to check whether the bug has already been reported. +- If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository. +- If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug. + +### Pull requests + +We welcome bug fixes, improvements, and new features. +Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. +For minor items, just open a pull request. + +[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote: + + $ git clone git@github.com:/go-zfs.git + $ cd go-zfs + $ git remote add upstream https://github.com/mistifyio/go-zfs.git + +If you need to pull new changes committed upstream: + + $ git checkout master + $ git fetch upstream + $ git merge upstream/master + +Don' work directly on master as this makes it harder to merge later. +Create a feature branch for your fix or new feature: + + $ git checkout -b + +Please try to commit your changes in logical chunks. +Ideally, you should include the issue number in the commit message. + + $ git commit -m "Issue # - " + +Push your feature branch to your fork. + + $ git push origin + +[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. +Please give your pull request a clear title and description and note which issue(s) your pull request fixes. + +- All linters should be happy (can be run with `make verify`). +- Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing). + +**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE). + +### Go Tools + +For consistency and to catch minor issues for all of go code, please run `make verify`. + +Many editors can execute the above on save. + +--- + +Guidelines based on http://azkaban.github.io/contributing.html diff --git a/vendor/github.com/mistifyio/go-zfs/v3/LICENSE b/vendor/github.com/mistifyio/go-zfs/v3/LICENSE new file mode 100644 index 0000000000..f4c265cfec --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2014, OmniTI Computer Consulting, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Makefile b/vendor/github.com/mistifyio/go-zfs/v3/Makefile new file mode 100644 index 0000000000..1c5f55e8c6 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/Makefile @@ -0,0 +1,19 @@ +help: ## Print this help + @grep --no-filename -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sed 's/:.*## /·/' | sort | column -t -W 2 -s '·' -c $(shell tput cols) + +all: test ## Run tests + +-include rules.mk +-include lint.mk + +test: ## Run tests + go test ./... + +verify: gofumpt prettier lint ## Verify code style, is lint free, freshness ... + git diff | (! grep .) + +fix: gofumpt-fix prettier-fix ## Fix code formatting errors + +tools: ${toolsBins} ## Build Go based build tools + +.PHONY: all help test tools verify diff --git a/vendor/github.com/mistifyio/go-zfs/v3/README.md b/vendor/github.com/mistifyio/go-zfs/v3/README.md new file mode 100644 index 0000000000..c911833002 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/README.md @@ -0,0 +1,53 @@ +# Go Wrapper for ZFS + +Simple wrappers for ZFS command line tools. + +[![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs) + +## Requirements + +You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS: + + sudo apt-get install python-software-properties + sudo apt-add-repository ppa:zfs-native/stable + sudo apt-get update + sudo apt-get install ubuntu-zfs libzfs-dev + +Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install + +Generally you need root privileges to use anything zfs related. + +## Status + +This has been only been tested on Ubuntu 14.04 + +In the future, we hope to work directly with libzfs. + +# Hacking + +The tests have decent examples for most functions. + +```go +//assuming a zpool named test +//error handling omitted + + +f, err := zfs.CreateFilesystem("test/snapshot-test", nil) +ok(t, err) + +s, err := f.Snapshot("test", nil) +ok(t, err) + +// snapshot is named "test/snapshot-test@test" + +c, err := s.Clone("test/clone-test", nil) + +err := c.Destroy() +err := s.Destroy() +err := f.Destroy() + +``` + +# Contributing + +See the [contributing guidelines](./CONTRIBUTING.md) diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile new file mode 100644 index 0000000000..7d8d2decd3 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile @@ -0,0 +1,33 @@ +GOVERSION = "1.17.8" + +Vagrant.configure("2") do |config| + config.vm.define "ubuntu" do |ubuntu| + ubuntu.vm.box = "generic/ubuntu2004" + end + config.vm.define "freebsd" do |freebsd| + freebsd.vm.box = "generic/freebsd13" + end + config.ssh.forward_agent = true + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true + config.vm.provision "shell", inline: <<-EOF + set -euxo pipefail + + os=$(uname -s|tr '[A-Z]' '[a-z]') + case $os in + linux) apt-get update -y && apt-get install -y --no-install-recommends gcc libc-dev zfsutils-linux ;; + esac + + cd /tmp + curl -fLO --retry-max-time 30 --retry 10 https://go.dev/dl/go#{GOVERSION}.$os-amd64.tar.gz + tar -C /usr/local -zxf go#{GOVERSION}.$os-amd64.tar.gz + ln -nsf /usr/local/go/bin/go /usr/local/bin/go + rm -rf go*.tar.gz + + chown -R vagrant:vagrant /home/vagrant/go + cd /home/vagrant/go/src/github.com/mistifyio/go-zfs + go test -c + sudo ./go-zfs.test -test.v + CGO_ENABLED=0 go test -c + sudo ./go-zfs.test -test.v + EOF +end diff --git a/vendor/github.com/mistifyio/go-zfs/v3/error.go b/vendor/github.com/mistifyio/go-zfs/v3/error.go new file mode 100644 index 0000000000..5408ccdb55 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/error.go @@ -0,0 +1,18 @@ +package zfs + +import ( + "fmt" +) + +// Error is an error which is returned when the `zfs` or `zpool` shell +// commands return with a non-zero exit code. +type Error struct { + Err error + Debug string + Stderr string +} + +// Error returns the string representation of an Error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr) +} diff --git a/vendor/github.com/mistifyio/go-zfs/v3/lint.mk b/vendor/github.com/mistifyio/go-zfs/v3/lint.mk new file mode 100644 index 0000000000..a1e0a4fd36 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/lint.mk @@ -0,0 +1,75 @@ +# BEGIN: lint-install -makefile lint.mk . +# http://github.com/tinkerbell/lint-install + +.PHONY: lint +lint: _lint + +LINT_ARCH := $(shell uname -m) +LINT_OS := $(shell uname) +LINT_OS_LOWER := $(shell echo $(LINT_OS) | tr '[:upper:]' '[:lower:]') +LINT_ROOT := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) + +# shellcheck and hadolint lack arm64 native binaries: rely on x86-64 emulation +ifeq ($(LINT_OS),Darwin) + ifeq ($(LINT_ARCH),arm64) + LINT_ARCH=x86_64 + endif +endif + +LINTERS := +FIXERS := + +SHELLCHECK_VERSION ?= v0.8.0 +SHELLCHECK_BIN := out/linters/shellcheck-$(SHELLCHECK_VERSION)-$(LINT_ARCH) +$(SHELLCHECK_BIN): + mkdir -p out/linters + rm -rf out/linters/shellcheck-* + curl -sSfL https://github.com/koalaman/shellcheck/releases/download/$(SHELLCHECK_VERSION)/shellcheck-$(SHELLCHECK_VERSION).$(LINT_OS_LOWER).$(LINT_ARCH).tar.xz | tar -C out/linters -xJf - + mv out/linters/shellcheck-$(SHELLCHECK_VERSION)/shellcheck $@ + rm -rf out/linters/shellcheck-$(SHELLCHECK_VERSION)/shellcheck + +LINTERS += shellcheck-lint +shellcheck-lint: $(SHELLCHECK_BIN) + $(SHELLCHECK_BIN) $(shell find . -name "*.sh") + +FIXERS += shellcheck-fix +shellcheck-fix: $(SHELLCHECK_BIN) + $(SHELLCHECK_BIN) $(shell find . -name "*.sh") -f diff | { read -t 1 line || exit 0; { echo "$$line" && cat; } | git apply -p2; } + +GOLANGCI_LINT_CONFIG := $(LINT_ROOT)/.golangci.yml +GOLANGCI_LINT_VERSION ?= v1.43.0 +GOLANGCI_LINT_BIN := out/linters/golangci-lint-$(GOLANGCI_LINT_VERSION)-$(LINT_ARCH) +$(GOLANGCI_LINT_BIN): + mkdir -p out/linters + rm -rf out/linters/golangci-lint-* + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b out/linters $(GOLANGCI_LINT_VERSION) + mv out/linters/golangci-lint $@ + +LINTERS += golangci-lint-lint +golangci-lint-lint: $(GOLANGCI_LINT_BIN) + find . -name go.mod -execdir "$(GOLANGCI_LINT_BIN)" run -c "$(GOLINT_CONFIG)" \; + +FIXERS += golangci-lint-fix +golangci-lint-fix: $(GOLANGCI_LINT_BIN) + find . -name go.mod -execdir "$(GOLANGCI_LINT_BIN)" run -c "$(GOLINT_CONFIG)" --fix \; + +YAMLLINT_VERSION ?= 1.26.3 +YAMLLINT_ROOT := out/linters/yamllint-$(YAMLLINT_VERSION) +YAMLLINT_BIN := $(YAMLLINT_ROOT)/dist/bin/yamllint +$(YAMLLINT_BIN): + mkdir -p out/linters + rm -rf out/linters/yamllint-* + curl -sSfL https://github.com/adrienverge/yamllint/archive/refs/tags/v$(YAMLLINT_VERSION).tar.gz | tar -C out/linters -zxf - + cd $(YAMLLINT_ROOT) && pip3 install --target dist . + +LINTERS += yamllint-lint +yamllint-lint: $(YAMLLINT_BIN) + PYTHONPATH=$(YAMLLINT_ROOT)/dist $(YAMLLINT_ROOT)/dist/bin/yamllint . + +.PHONY: _lint $(LINTERS) +_lint: $(LINTERS) + +.PHONY: fix $(FIXERS) +fix: $(FIXERS) + +# END: lint-install -makefile lint.mk . diff --git a/vendor/github.com/mistifyio/go-zfs/v3/rules.mk b/vendor/github.com/mistifyio/go-zfs/v3/rules.mk new file mode 100644 index 0000000000..4746c978a6 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/rules.mk @@ -0,0 +1,49 @@ +# Only use the recipes defined in these makefiles +MAKEFLAGS += --no-builtin-rules +.SUFFIXES: +# Delete target files if there's an error +# This avoids a failure to then skip building on next run if the output is created by shell redirection for example +# Not really necessary for now, but just good to have already if it becomes necessary later. +.DELETE_ON_ERROR: +# Treat the whole recipe as a one shell script/invocation instead of one-per-line +.ONESHELL: +# Use bash instead of plain sh +SHELL := bash +.SHELLFLAGS := -o pipefail -euc + +version := $(shell git rev-parse --short HEAD) +tag := $(shell git tag --points-at HEAD) +ifneq (,$(tag)) +version := $(tag)-$(version) +endif +LDFLAGS := -ldflags "-X main.version=$(version)" +export CGO_ENABLED := 0 + +ifeq ($(origin GOBIN), undefined) +GOBIN := ${PWD}/bin +export GOBIN +PATH := ${GOBIN}:${PATH} +export PATH +endif + +toolsBins := $(addprefix bin/,$(notdir $(shell grep '^\s*_' tooling/tools.go | awk -F'"' '{print $$2}'))) + +# installs cli tools defined in tools.go +$(toolsBins): tooling/go.mod tooling/go.sum tooling/tools.go +$(toolsBins): CMD=$(shell awk -F'"' '/$(@F)"/ {print $$2}' tooling/tools.go) +$(toolsBins): + cd tooling && go install $(CMD) + +.PHONY: gofumpt +gofumpt: bin/gofumpt + gofumpt -s -d . + +gofumpt-fix: bin/gofumpt + gofumpt -s -w . + +.PHONY: prettier prettier-fix +prettier: + prettier --list-different --ignore-path .gitignore . + +prettier-fix: + prettier --write --ignore-path .gitignore . diff --git a/vendor/github.com/mistifyio/go-zfs/v3/shell.nix b/vendor/github.com/mistifyio/go-zfs/v3/shell.nix new file mode 100644 index 0000000000..e0ea24c16f --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/shell.nix @@ -0,0 +1,26 @@ +let _pkgs = import { }; +in { pkgs ? import (_pkgs.fetchFromGitHub { + owner = "NixOS"; + repo = "nixpkgs"; + #branch@date: 21.11@2022-02-13 + rev = "560ad8a2f89586ab1a14290f128ad6a393046065"; + sha256 = "0s0dv1clfpjyzy4p6ywxvzmwx9ddbr2yl77jf1wqdbr0x1206hb8"; +}) { } }: + +with pkgs; + +mkShell { + buildInputs = [ + git + gnumake + gnused + go + nixfmt + nodePackages.prettier + python3Packages.pip + python3Packages.setuptools + rufo + shfmt + vagrant + ]; +} diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils.go b/vendor/github.com/mistifyio/go-zfs/v3/utils.go new file mode 100644 index 0000000000..b69942b530 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils.go @@ -0,0 +1,357 @@ +package zfs + +import ( + "bytes" + "errors" + "fmt" + "io" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/google/uuid" +) + +type command struct { + Command string + Stdin io.Reader + Stdout io.Writer +} + +func (c *command) Run(arg ...string) ([][]string, error) { + cmd := exec.Command(c.Command, arg...) + + var stdout, stderr bytes.Buffer + + if c.Stdout == nil { + cmd.Stdout = &stdout + } else { + cmd.Stdout = c.Stdout + } + + if c.Stdin != nil { + cmd.Stdin = c.Stdin + } + cmd.Stderr = &stderr + + id := uuid.New().String() + joinedArgs := cmd.Path + if len(cmd.Args) > 1 { + joinedArgs = strings.Join(append([]string{cmd.Path}, cmd.Args[1:]...), " ") + } + + logger.Log([]string{"ID:" + id, "START", joinedArgs}) + if err := cmd.Run(); err != nil { + return nil, &Error{ + Err: err, + Debug: joinedArgs, + Stderr: stderr.String(), + } + } + logger.Log([]string{"ID:" + id, "FINISH"}) + + // assume if you passed in something for stdout, that you know what to do with it + if c.Stdout != nil { + return nil, nil + } + + lines := strings.Split(stdout.String(), "\n") + + // last line is always blank + lines = lines[0 : len(lines)-1] + output := make([][]string, len(lines)) + + for i, l := range lines { + output[i] = strings.Split(l, "\t") + } + + return output, nil +} + +func setString(field *string, value string) { + v := "" + if value != "-" { + v = value + } + *field = v +} + +func setUint(field *uint64, value string) error { + var v uint64 + if value != "-" { + var err error + v, err = strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + } + *field = v + return nil +} + +func (d *Dataset) parseLine(line []string) error { + var err error + + if len(line) != len(dsPropList) { + return errors.New("output does not match what is expected on this platform") + } + setString(&d.Name, line[0]) + setString(&d.Origin, line[1]) + + if err = setUint(&d.Used, line[2]); err != nil { + return err + } + if err = setUint(&d.Avail, line[3]); err != nil { + return err + } + + setString(&d.Mountpoint, line[4]) + setString(&d.Compression, line[5]) + setString(&d.Type, line[6]) + + if err = setUint(&d.Volsize, line[7]); err != nil { + return err + } + if err = setUint(&d.Quota, line[8]); err != nil { + return err + } + if err = setUint(&d.Referenced, line[9]); err != nil { + return err + } + + if runtime.GOOS == "solaris" { + return nil + } + + if err = setUint(&d.Written, line[10]); err != nil { + return err + } + if err = setUint(&d.Logicalused, line[11]); err != nil { + return err + } + return setUint(&d.Usedbydataset, line[12]) +} + +/* + * from zfs diff`s escape function: + * + * Prints a file name out a character at a time. If the character is + * not in the range of what we consider "printable" ASCII, display it + * as an escaped 3-digit octal value. ASCII values less than a space + * are all control characters and we declare the upper end as the + * DELete character. This also is the last 7-bit ASCII character. + * We choose to treat all 8-bit ASCII as not printable for this + * application. + */ +func unescapeFilepath(path string) (string, error) { + buf := make([]byte, 0, len(path)) + llen := len(path) + for i := 0; i < llen; { + if path[i] == '\\' { + if llen < i+4 { + return "", fmt.Errorf("invalid octal code: too short") + } + octalCode := path[(i + 1):(i + 4)] + val, err := strconv.ParseUint(octalCode, 8, 8) + if err != nil { + return "", fmt.Errorf("invalid octal code: %w", err) + } + buf = append(buf, byte(val)) + i += 4 + } else { + buf = append(buf, path[i]) + i++ + } + } + return string(buf), nil +} + +var changeTypeMap = map[string]ChangeType{ + "-": Removed, + "+": Created, + "M": Modified, + "R": Renamed, +} + +var inodeTypeMap = map[string]InodeType{ + "B": BlockDevice, + "C": CharacterDevice, + "/": Directory, + ">": Door, + "|": NamedPipe, + "@": SymbolicLink, + "P": EventPort, + "=": Socket, + "F": File, +} + +// matches (+1) or (-1). +var referenceCountRegex = regexp.MustCompile(`\(([+-]\d+?)\)`) + +func parseReferenceCount(field string) (int, error) { + matches := referenceCountRegex.FindStringSubmatch(field) + if matches == nil { + return 0, fmt.Errorf("regexp does not match") + } + return strconv.Atoi(matches[1]) +} + +func parseInodeChange(line []string) (*InodeChange, error) { + llen := len(line) // nolint:ifshort // llen *is* actually used + if llen < 1 { + return nil, fmt.Errorf("empty line passed") + } + + changeType := changeTypeMap[line[0]] + if changeType == 0 { + return nil, fmt.Errorf("unknown change type '%s'", line[0]) + } + + switch changeType { + case Renamed: + if llen != 4 { + return nil, fmt.Errorf("mismatching number of fields: expect 4, got: %d", llen) + } + case Modified: + if llen != 4 && llen != 3 { + return nil, fmt.Errorf("mismatching number of fields: expect 3..4, got: %d", llen) + } + default: + if llen != 3 { + return nil, fmt.Errorf("mismatching number of fields: expect 3, got: %d", llen) + } + } + + inodeType := inodeTypeMap[line[1]] + if inodeType == 0 { + return nil, fmt.Errorf("unknown inode type '%s'", line[1]) + } + + path, err := unescapeFilepath(line[2]) + if err != nil { + return nil, fmt.Errorf("failed to parse filename: %w", err) + } + + var newPath string + var referenceCount int + switch changeType { + case Renamed: + newPath, err = unescapeFilepath(line[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse filename: %w", err) + } + case Modified: + if llen == 4 { + referenceCount, err = parseReferenceCount(line[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse reference count: %w", err) + } + } + default: + newPath = "" + } + + return &InodeChange{ + Change: changeType, + Type: inodeType, + Path: path, + NewPath: newPath, + ReferenceCountChange: referenceCount, + }, nil +} + +// example input for parseInodeChanges +// M / /testpool/bar/ +// + F /testpool/bar/hello.txt +// M / /testpool/bar/hello.txt (+1) +// M / /testpool/bar/hello-hardlink + +func parseInodeChanges(lines [][]string) ([]*InodeChange, error) { + changes := make([]*InodeChange, len(lines)) + + for i, line := range lines { + c, err := parseInodeChange(line) + if err != nil { + return nil, fmt.Errorf("failed to parse line %d of zfs diff: %w, got: '%s'", i, err, line) + } + changes[i] = c + } + return changes, nil +} + +func listByType(t, filter string) ([]*Dataset, error) { + args := []string{"list", "-rHp", "-t", t, "-o", dsPropListOptions} + + if filter != "" { + args = append(args, filter) + } + out, err := zfsOutput(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return datasets, nil +} + +func propsSlice(properties map[string]string) []string { + args := make([]string, 0, len(properties)*3) + for k, v := range properties { + args = append(args, "-o") + args = append(args, fmt.Sprintf("%s=%s", k, v)) + } + return args +} + +func (z *Zpool) parseLine(line []string) error { + prop := line[1] + val := line[2] + + var err error + + switch prop { + case "name": + setString(&z.Name, val) + case "health": + setString(&z.Health, val) + case "allocated": + err = setUint(&z.Allocated, val) + case "size": + err = setUint(&z.Size, val) + case "free": + err = setUint(&z.Free, val) + case "fragmentation": + // Trim trailing "%" before parsing uint + i := strings.Index(val, "%") + if i < 0 { + i = len(val) + } + err = setUint(&z.Fragmentation, val[:i]) + case "readonly": + z.ReadOnly = val == "on" + case "freeing": + err = setUint(&z.Freeing, val) + case "leaked": + err = setUint(&z.Leaked, val) + case "dedupratio": + // Trim trailing "x" before parsing float64 + z.DedupRatio, err = strconv.ParseFloat(val[:len(val)-1], 64) + } + return err +} diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go new file mode 100644 index 0000000000..b1ce59656b --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go @@ -0,0 +1,19 @@ +//go:build !solaris +// +build !solaris + +package zfs + +import "strings" + +var ( + // List of ZFS properties to retrieve from zfs list command on a non-Solaris platform. + dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced", "written", "logicalused", "usedbydataset"} + + dsPropListOptions = strings.Join(dsPropList, ",") + + // List of Zpool properties to retrieve from zpool list command on a non-Solaris platform. + zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"} + + zpoolPropListOptions = strings.Join(zpoolPropList, ",") + zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions} +) diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go new file mode 100644 index 0000000000..f19aebabb2 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go @@ -0,0 +1,19 @@ +//go:build solaris +// +build solaris + +package zfs + +import "strings" + +var ( + // List of ZFS properties to retrieve from zfs list command on a Solaris platform + dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced"} + + dsPropListOptions = strings.Join(dsPropList, ",") + + // List of Zpool properties to retrieve from zpool list command on a non-Solaris platform + zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"} + + zpoolPropListOptions = strings.Join(zpoolPropList, ",") + zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions} +) diff --git a/vendor/github.com/mistifyio/go-zfs/v3/zfs.go b/vendor/github.com/mistifyio/go-zfs/v3/zfs.go new file mode 100644 index 0000000000..1166bdc212 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/zfs.go @@ -0,0 +1,449 @@ +// Package zfs provides wrappers around the ZFS command line tools. +package zfs + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// ZFS dataset types, which can indicate if a dataset is a filesystem, snapshot, or volume. +const ( + DatasetFilesystem = "filesystem" + DatasetSnapshot = "snapshot" + DatasetVolume = "volume" +) + +// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, or volume. +// The Type struct member can be used to determine a dataset's type. +// +// The field definitions can be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. +type Dataset struct { + Name string + Origin string + Used uint64 + Avail uint64 + Mountpoint string + Compression string + Type string + Written uint64 + Volsize uint64 + Logicalused uint64 + Usedbydataset uint64 + Quota uint64 + Referenced uint64 +} + +// InodeType is the type of inode as reported by Diff. +type InodeType int + +// Types of Inodes. +const ( + _ = iota // 0 == unknown type + BlockDevice InodeType = iota + CharacterDevice + Directory + Door + NamedPipe + SymbolicLink + EventPort + Socket + File +) + +// ChangeType is the type of inode change as reported by Diff. +type ChangeType int + +// Types of Changes. +const ( + _ = iota // 0 == unknown type + Removed ChangeType = iota + Created + Modified + Renamed +) + +// DestroyFlag is the options flag passed to Destroy. +type DestroyFlag int + +// Valid destroy options. +const ( + DestroyDefault DestroyFlag = 1 << iota + DestroyRecursive = 1 << iota + DestroyRecursiveClones = 1 << iota + DestroyDeferDeletion = 1 << iota + DestroyForceUmount = 1 << iota +) + +// InodeChange represents a change as reported by Diff. +type InodeChange struct { + Change ChangeType + Type InodeType + Path string + NewPath string + ReferenceCountChange int +} + +// Logger can be used to log commands/actions. +type Logger interface { + Log(cmd []string) +} + +type defaultLogger struct{} + +func (*defaultLogger) Log([]string) { +} + +var logger Logger = &defaultLogger{} + +// SetLogger set a log handler to log all commands including arguments before they are executed. +func SetLogger(l Logger) { + if l != nil { + logger = l + } +} + +// zfs is a helper function to wrap typical calls to zfs that ignores stdout. +func zfs(arg ...string) error { + _, err := zfsOutput(arg...) + return err +} + +// zfs is a helper function to wrap typical calls to zfs. +func zfsOutput(arg ...string) ([][]string, error) { + c := command{Command: "zfs"} + return c.Run(arg...) +} + +// Datasets returns a slice of ZFS datasets, regardless of type. +// A filter argument may be passed to select a dataset with the matching name, or empty string ("") may be used to select all datasets. +func Datasets(filter string) ([]*Dataset, error) { + return listByType("all", filter) +} + +// Snapshots returns a slice of ZFS snapshots. +// A filter argument may be passed to select a snapshot with the matching name, or empty string ("") may be used to select all snapshots. +func Snapshots(filter string) ([]*Dataset, error) { + return listByType(DatasetSnapshot, filter) +} + +// Filesystems returns a slice of ZFS filesystems. +// A filter argument may be passed to select a filesystem with the matching name, or empty string ("") may be used to select all filesystems. +func Filesystems(filter string) ([]*Dataset, error) { + return listByType(DatasetFilesystem, filter) +} + +// Volumes returns a slice of ZFS volumes. +// A filter argument may be passed to select a volume with the matching name, or empty string ("") may be used to select all volumes. +func Volumes(filter string) ([]*Dataset, error) { + return listByType(DatasetVolume, filter) +} + +// GetDataset retrieves a single ZFS dataset by name. +// This dataset could be any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume. +func GetDataset(name string) (*Dataset, error) { + out, err := zfsOutput("list", "-Hp", "-o", dsPropListOptions, name) + if err != nil { + return nil, err + } + + ds := &Dataset{Name: name} + for _, line := range out { + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return ds, nil +} + +// Clone clones a ZFS snapshot and returns a clone dataset. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) { + if d.Type != DatasetSnapshot { + return nil, errors.New("can only clone snapshots") + } + args := make([]string, 2, 4) + args[0] = "clone" + args[1] = "-p" + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, []string{d.Name, dest}...) + if err := zfs(args...); err != nil { + return nil, err + } + return GetDataset(dest) +} + +// Unmount unmounts currently mounted ZFS file systems. +func (d *Dataset) Unmount(force bool) (*Dataset, error) { + if d.Type == DatasetSnapshot { + return nil, errors.New("cannot unmount snapshots") + } + args := make([]string, 1, 3) + args[0] = "umount" + if force { + args = append(args, "-f") + } + args = append(args, d.Name) + if err := zfs(args...); err != nil { + return nil, err + } + return GetDataset(d.Name) +} + +// Mount mounts ZFS file systems. +func (d *Dataset) Mount(overlay bool, options []string) (*Dataset, error) { + if d.Type == DatasetSnapshot { + return nil, errors.New("cannot mount snapshots") + } + args := make([]string, 1, 5) + args[0] = "mount" + if overlay { + args = append(args, "-O") + } + if options != nil { + args = append(args, "-o") + args = append(args, strings.Join(options, ",")) + } + args = append(args, d.Name) + if err := zfs(args...); err != nil { + return nil, err + } + return GetDataset(d.Name) +} + +// ReceiveSnapshot receives a ZFS stream from the input io.Reader. +// A new snapshot is created with the specified name, and streams the input data into the newly-created snapshot. +func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) { + c := command{Command: "zfs", Stdin: input} + if _, err := c.Run("receive", name); err != nil { + return nil, err + } + return GetDataset(name) +} + +// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) SendSnapshot(output io.Writer) error { + if d.Type != DatasetSnapshot { + return errors.New("can only send snapshots") + } + + c := command{Command: "zfs", Stdout: output} + _, err := c.Run("send", d.Name) + return err +} + +// IncrementalSend sends a ZFS stream of a snapshot to the input io.Writer using the baseSnapshot as the starting point. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) IncrementalSend(baseSnapshot *Dataset, output io.Writer) error { + if d.Type != DatasetSnapshot || baseSnapshot.Type != DatasetSnapshot { + return errors.New("can only send snapshots") + } + c := command{Command: "zfs", Stdout: output} + _, err := c.Run("send", "-i", baseSnapshot.Name, d.Name) + return err +} + +// CreateVolume creates a new ZFS volume with the specified name, size, and properties. +// +// A full list of available ZFS properties may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. +func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) { + args := make([]string, 4, 5) + args[0] = "create" + args[1] = "-p" + args[2] = "-V" + args[3] = strconv.FormatUint(size, 10) + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, name) + if err := zfs(args...); err != nil { + return nil, err + } + return GetDataset(name) +} + +// Destroy destroys a ZFS dataset. +// If the destroy bit flag is set, any descendents of the dataset will be recursively destroyed, including snapshots. +// If the deferred bit flag is set, the snapshot is marked for deferred deletion. +func (d *Dataset) Destroy(flags DestroyFlag) error { + args := make([]string, 1, 3) + args[0] = "destroy" + if flags&DestroyRecursive != 0 { + args = append(args, "-r") + } + + if flags&DestroyRecursiveClones != 0 { + args = append(args, "-R") + } + + if flags&DestroyDeferDeletion != 0 { + args = append(args, "-d") + } + + if flags&DestroyForceUmount != 0 { + args = append(args, "-f") + } + + args = append(args, d.Name) + err := zfs(args...) + return err +} + +// SetProperty sets a ZFS property on the receiving dataset. +// +// A full list of available ZFS properties may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. +func (d *Dataset) SetProperty(key, val string) error { + prop := strings.Join([]string{key, val}, "=") + err := zfs("set", prop, d.Name) + return err +} + +// GetProperty returns the current value of a ZFS property from the receiving dataset. +// +// A full list of available ZFS properties may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. +func (d *Dataset) GetProperty(key string) (string, error) { + out, err := zfsOutput("get", "-H", key, d.Name) + if err != nil { + return "", err + } + + return out[0][2], nil +} + +// Rename renames a dataset. +func (d *Dataset) Rename(name string, createParent, recursiveRenameSnapshots bool) (*Dataset, error) { + args := make([]string, 3, 5) + args[0] = "rename" + args[1] = d.Name + args[2] = name + if createParent { + args = append(args, "-p") + } + if recursiveRenameSnapshots { + args = append(args, "-r") + } + if err := zfs(args...); err != nil { + return d, err + } + + return GetDataset(name) +} + +// Snapshots returns a slice of all ZFS snapshots of a given dataset. +func (d *Dataset) Snapshots() ([]*Dataset, error) { + return Snapshots(d.Name) +} + +// CreateFilesystem creates a new ZFS filesystem with the specified name and properties. +// +// A full list of available ZFS properties may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. +func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "create" + + if properties != nil { + args = append(args, propsSlice(properties)...) + } + + args = append(args, name) + if err := zfs(args...); err != nil { + return nil, err + } + return GetDataset(name) +} + +// Snapshot creates a new ZFS snapshot of the receiving dataset, using the specified name. +// Optionally, the snapshot can be taken recursively, creating snapshots of all descendent filesystems in a single, atomic operation. +func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "snapshot" + if recursive { + args = append(args, "-r") + } + snapName := fmt.Sprintf("%s@%s", d.Name, name) + args = append(args, snapName) + if err := zfs(args...); err != nil { + return nil, err + } + return GetDataset(snapName) +} + +// Rollback rolls back the receiving ZFS dataset to a previous snapshot. +// Optionally, intermediate snapshots can be destroyed. +// A ZFS snapshot rollback cannot be completed without this option, if more recent snapshots exist. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Rollback(destroyMoreRecent bool) error { + if d.Type != DatasetSnapshot { + return errors.New("can only rollback snapshots") + } + + args := make([]string, 1, 3) + args[0] = "rollback" + if destroyMoreRecent { + args = append(args, "-r") + } + args = append(args, d.Name) + + err := zfs(args...) + return err +} + +// Children returns a slice of children of the receiving ZFS dataset. +// A recursion depth may be specified, or a depth of 0 allows unlimited recursion. +func (d *Dataset) Children(depth uint64) ([]*Dataset, error) { + args := []string{"list"} + if depth > 0 { + args = append(args, "-d") + args = append(args, strconv.FormatUint(depth, 10)) + } else { + args = append(args, "-r") + } + args = append(args, "-t", "all", "-Hp", "-o", dsPropListOptions) + args = append(args, d.Name) + + out, err := zfsOutput(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + return datasets[1:], nil +} + +// Diff returns changes between a snapshot and the given ZFS dataset. +// The snapshot name must include the filesystem part as it is possible to compare clones with their origin snapshots. +func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) { + args := []string{"diff", "-FH", snapshot, d.Name} + out, err := zfsOutput(args...) + if err != nil { + return nil, err + } + inodeChanges, err := parseInodeChanges(out) + if err != nil { + return nil, err + } + return inodeChanges, nil +} diff --git a/vendor/github.com/mistifyio/go-zfs/v3/zpool.go b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go new file mode 100644 index 0000000000..a0bd6471a5 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go @@ -0,0 +1,116 @@ +package zfs + +// ZFS zpool states, which can indicate if a pool is online, offline, degraded, etc. +// +// More information regarding zpool states can be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zpoolconcepts.7.html#Device_Failure_and_Recovery +const ( + ZpoolOnline = "ONLINE" + ZpoolDegraded = "DEGRADED" + ZpoolFaulted = "FAULTED" + ZpoolOffline = "OFFLINE" + ZpoolUnavail = "UNAVAIL" + ZpoolRemoved = "REMOVED" +) + +// Zpool is a ZFS zpool. +// A pool is a top-level structure in ZFS, and can contain many descendent datasets. +type Zpool struct { + Name string + Health string + Allocated uint64 + Size uint64 + Free uint64 + Fragmentation uint64 + ReadOnly bool + Freeing uint64 + Leaked uint64 + DedupRatio float64 +} + +// zpool is a helper function to wrap typical calls to zpool and ignores stdout. +func zpool(arg ...string) error { + _, err := zpoolOutput(arg...) + return err +} + +// zpool is a helper function to wrap typical calls to zpool. +func zpoolOutput(arg ...string) ([][]string, error) { + c := command{Command: "zpool"} + return c.Run(arg...) +} + +// GetZpool retrieves a single ZFS zpool by name. +func GetZpool(name string) (*Zpool, error) { + args := zpoolArgs + args = append(args, name) + out, err := zpoolOutput(args...) + if err != nil { + return nil, err + } + + z := &Zpool{Name: name} + for _, line := range out { + if err := z.parseLine(line); err != nil { + return nil, err + } + } + + return z, nil +} + +// Datasets returns a slice of all ZFS datasets in a zpool. +func (z *Zpool) Datasets() ([]*Dataset, error) { + return Datasets(z.Name) +} + +// Snapshots returns a slice of all ZFS snapshots in a zpool. +func (z *Zpool) Snapshots() ([]*Dataset, error) { + return Snapshots(z.Name) +} + +// CreateZpool creates a new ZFS zpool with the specified name, properties, and optional arguments. +// +// A full list of available ZFS properties and command-line arguments may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. +// https://openzfs.github.io/openzfs-docs/man/8/zpool-create.8.html +func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) { + cli := make([]string, 1, 4) + cli[0] = "create" + if properties != nil { + cli = append(cli, propsSlice(properties)...) + } + cli = append(cli, name) + cli = append(cli, args...) + if err := zpool(cli...); err != nil { + return nil, err + } + + return &Zpool{Name: name}, nil +} + +// Destroy destroys a ZFS zpool by name. +func (z *Zpool) Destroy() error { + err := zpool("destroy", z.Name) + return err +} + +// ListZpools list all ZFS zpools accessible on the current system. +func ListZpools() ([]*Zpool, error) { + args := []string{"list", "-Ho", "name"} + out, err := zpoolOutput(args...) + if err != nil { + return nil, err + } + + var pools []*Zpool + + for _, line := range out { + z, err := GetZpool(line[0]) + if err != nil { + return nil, err + } + pools = append(pools, z) + } + return pools, nil +} diff --git a/vendor/github.com/opencontainers/go-digest/digestset/set.go b/vendor/github.com/opencontainers/go-digest/digestset/set.go new file mode 100644 index 0000000000..71f24184ca --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digestset/set.go @@ -0,0 +1,262 @@ +// Copyright 2020, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/opencontainers/selinux/LICENSE b/vendor/github.com/opencontainers/selinux/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go new file mode 100644 index 0000000000..57a15c9a11 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go @@ -0,0 +1,13 @@ +/* +Package selinux provides a high-level interface for interacting with selinux. + +Usage: + + import "github.com/opencontainers/selinux/go-selinux" + + // Ensure that selinux is enforcing mode. + if selinux.EnforceMode() != selinux.Enforcing { + selinux.SetEnforceMode(selinux.Enforcing) + } +*/ +package selinux diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go new file mode 100644 index 0000000000..07e0f77dc2 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go @@ -0,0 +1,115 @@ +package label + +import ( + "fmt" + + "github.com/opencontainers/selinux/go-selinux" +) + +// Deprecated: use selinux.ROFileLabel +var ROMountLabel = selinux.ROFileLabel + +// SetProcessLabel takes a process label and tells the kernel to assign the +// label to the next program executed by the current process. +// Deprecated: use selinux.SetExecLabel +var SetProcessLabel = selinux.SetExecLabel + +// ProcessLabel returns the process label that the kernel will assign +// to the next program executed by the current process. If "" is returned +// this indicates that the default labeling will happen for the process. +// Deprecated: use selinux.ExecLabel +var ProcessLabel = selinux.ExecLabel + +// SetSocketLabel takes a process label and tells the kernel to assign the +// label to the next socket that gets created +// Deprecated: use selinux.SetSocketLabel +var SetSocketLabel = selinux.SetSocketLabel + +// SocketLabel retrieves the current default socket label setting +// Deprecated: use selinux.SocketLabel +var SocketLabel = selinux.SocketLabel + +// SetKeyLabel takes a process label and tells the kernel to assign the +// label to the next kernel keyring that gets created +// Deprecated: use selinux.SetKeyLabel +var SetKeyLabel = selinux.SetKeyLabel + +// KeyLabel retrieves the current default kernel keyring label setting +// Deprecated: use selinux.KeyLabel +var KeyLabel = selinux.KeyLabel + +// FileLabel returns the label for specified path +// Deprecated: use selinux.FileLabel +var FileLabel = selinux.FileLabel + +// PidLabel will return the label of the process running with the specified pid +// Deprecated: use selinux.PidLabel +var PidLabel = selinux.PidLabel + +// Init initialises the labeling system +func Init() { + _ = selinux.GetEnabled() +} + +// ClearLabels will clear all reserved labels +// Deprecated: use selinux.ClearLabels +var ClearLabels = selinux.ClearLabels + +// ReserveLabel will record the fact that the MCS label has already been used. +// This will prevent InitLabels from using the MCS label in a newly created +// container +// Deprecated: use selinux.ReserveLabel +func ReserveLabel(label string) error { + selinux.ReserveLabel(label) + return nil +} + +// ReleaseLabel will remove the reservation of the MCS label. +// This will allow InitLabels to use the MCS label in a newly created +// containers +// Deprecated: use selinux.ReleaseLabel +func ReleaseLabel(label string) error { + selinux.ReleaseLabel(label) + return nil +} + +// DupSecOpt takes a process label and returns security options that +// can be used to set duplicate labels on future container processes +// Deprecated: use selinux.DupSecOpt +var DupSecOpt = selinux.DupSecOpt + +// FormatMountLabel returns a string to be used by the mount command. Using +// the SELinux `context` mount option. Changing labels of files on mount +// points with this option can never be changed. +// FormatMountLabel returns a string to be used by the mount command. +// The format of this string will be used to alter the labeling of the mountpoint. +// The string returned is suitable to be used as the options field of the mount command. +// If you need to have additional mount point options, you can pass them in as +// the first parameter. Second parameter is the label that you wish to apply +// to all content in the mount point. +func FormatMountLabel(src, mountLabel string) string { + return FormatMountLabelByType(src, mountLabel, "context") +} + +// FormatMountLabelByType returns a string to be used by the mount command. +// Allow caller to specify the mount options. For example using the SELinux +// `fscontext` mount option would allow certain container processes to change +// labels of files created on the mount points, where as `context` option does +// not. +// FormatMountLabelByType returns a string to be used by the mount command. +// The format of this string will be used to alter the labeling of the mountpoint. +// The string returned is suitable to be used as the options field of the mount command. +// If you need to have additional mount point options, you can pass them in as +// the first parameter. Second parameter is the label that you wish to apply +// to all content in the mount point. +func FormatMountLabelByType(src, mountLabel, contextType string) string { + if mountLabel != "" { + switch src { + case "": + src = fmt.Sprintf("%s=%q", contextType, mountLabel) + default: + src = fmt.Sprintf("%s,%s=%q", src, contextType, mountLabel) + } + } + return src +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go new file mode 100644 index 0000000000..f61a560158 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go @@ -0,0 +1,150 @@ +package label + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/selinux/go-selinux" +) + +// Valid Label Options +var validOptions = map[string]bool{ + "disable": true, + "type": true, + "filetype": true, + "user": true, + "role": true, + "level": true, +} + +var ErrIncompatibleLabel = errors.New("Bad SELinux option z and Z can not be used together") + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. The labels returned will include a random MCS String, that is +// guaranteed to be unique. +// If the disabled flag is passed in, the process label will not be set, but the mount label will be set +// to the container_file label with the maximum category. This label is not usable by any confined label. +func InitLabels(options []string) (plabel string, mlabel string, retErr error) { + if !selinux.GetEnabled() { + return "", "", nil + } + processLabel, mountLabel := selinux.ContainerLabels() + if processLabel != "" { + defer func() { + if retErr != nil { + selinux.ReleaseLabel(mountLabel) + } + }() + pcon, err := selinux.NewContext(processLabel) + if err != nil { + return "", "", err + } + mcsLevel := pcon["level"] + mcon, err := selinux.NewContext(mountLabel) + if err != nil { + return "", "", err + } + for _, opt := range options { + if opt == "disable" { + selinux.ReleaseLabel(mountLabel) + return "", selinux.PrivContainerMountLabel(), nil + } + if i := strings.Index(opt, ":"); i == -1 { + return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt) + } + con := strings.SplitN(opt, ":", 2) + if !validOptions[con[0]] { + return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0]) + } + if con[0] == "filetype" { + mcon["type"] = con[1] + continue + } + pcon[con[0]] = con[1] + if con[0] == "level" || con[0] == "user" { + mcon[con[0]] = con[1] + } + } + if pcon.Get() != processLabel { + if pcon["level"] != mcsLevel { + selinux.ReleaseLabel(processLabel) + } + processLabel = pcon.Get() + selinux.ReserveLabel(processLabel) + } + mountLabel = mcon.Get() + } + return processLabel, mountLabel, nil +} + +// Deprecated: The GenLabels function is only to be used during the transition +// to the official API. Use InitLabels(strings.Fields(options)) instead. +func GenLabels(options string) (string, string, error) { + return InitLabels(strings.Fields(options)) +} + +// SetFileLabel modifies the "path" label to the specified file label +func SetFileLabel(path string, fileLabel string) error { + if !selinux.GetEnabled() || fileLabel == "" { + return nil + } + return selinux.SetFileLabel(path, fileLabel) +} + +// SetFileCreateLabel tells the kernel the label for all files to be created +func SetFileCreateLabel(fileLabel string) error { + if !selinux.GetEnabled() { + return nil + } + return selinux.SetFSCreateLabel(fileLabel) +} + +// Relabel changes the label of path and all the entries beneath the path. +// It changes the MCS label to s0 if shared is true. +// This will allow all containers to share the content. +// +// The path itself is guaranteed to be relabeled last. +func Relabel(path string, fileLabel string, shared bool) error { + if !selinux.GetEnabled() || fileLabel == "" { + return nil + } + + if shared { + c, err := selinux.NewContext(fileLabel) + if err != nil { + return err + } + + c["level"] = "s0" + fileLabel = c.Get() + } + if err := selinux.Chcon(path, fileLabel, true); err != nil { + return err + } + return nil +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +// Deprecated: use selinux.DisableSecOpt +var DisableSecOpt = selinux.DisableSecOpt + +// Validate checks that the label does not include unexpected options +func Validate(label string) error { + if strings.Contains(label, "z") && strings.Contains(label, "Z") { + return ErrIncompatibleLabel + } + return nil +} + +// RelabelNeeded checks whether the user requested a relabel +func RelabelNeeded(label string) bool { + return strings.Contains(label, "z") || strings.Contains(label, "Z") +} + +// IsShared checks that the label includes a "shared" mark +func IsShared(label string) bool { + return strings.Contains(label, "z") +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go new file mode 100644 index 0000000000..f21c80c5ab --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go @@ -0,0 +1,50 @@ +//go:build !linux +// +build !linux + +package label + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. +func InitLabels(options []string) (string, string, error) { + return "", "", nil +} + +// Deprecated: The GenLabels function is only to be used during the transition +// to the official API. Use InitLabels(strings.Fields(options)) instead. +func GenLabels(options string) (string, string, error) { + return "", "", nil +} + +func SetFileLabel(path string, fileLabel string) error { + return nil +} + +func SetFileCreateLabel(fileLabel string) error { + return nil +} + +func Relabel(path string, fileLabel string, shared bool) error { + return nil +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return nil +} + +// Validate checks that the label does not include unexpected options +func Validate(label string) error { + return nil +} + +// RelabelNeeded checks whether the user requested a relabel +func RelabelNeeded(label string) bool { + return false +} + +// IsShared checks that the label includes a "shared" mark +func IsShared(label string) bool { + return false +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go new file mode 100644 index 0000000000..af058b84b1 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -0,0 +1,314 @@ +package selinux + +import ( + "errors" +) + +const ( + // Enforcing constant indicate SELinux is in enforcing mode + Enforcing = 1 + // Permissive constant to indicate SELinux is in permissive mode + Permissive = 0 + // Disabled constant to indicate SELinux is disabled + Disabled = -1 + // maxCategory is the maximum number of categories used within containers + maxCategory = 1024 + // DefaultCategoryRange is the upper bound on the category range + DefaultCategoryRange = uint32(maxCategory) +) + +var ( + // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS. + ErrMCSAlreadyExists = errors.New("MCS label already exists") + // ErrEmptyPath is returned when an empty path has been specified. + ErrEmptyPath = errors.New("empty path") + + // ErrInvalidLabel is returned when an invalid label is specified. + ErrInvalidLabel = errors.New("invalid Label") + + // InvalidLabel is returned when an invalid label is specified. + // + // Deprecated: use [ErrInvalidLabel]. + InvalidLabel = ErrInvalidLabel + + // ErrIncomparable is returned two levels are not comparable + ErrIncomparable = errors.New("incomparable levels") + // ErrLevelSyntax is returned when a sensitivity or category do not have correct syntax in a level + ErrLevelSyntax = errors.New("invalid level syntax") + + // ErrContextMissing is returned if a requested context is not found in a file. + ErrContextMissing = errors.New("context does not have a match") + // ErrVerifierNil is returned when a context verifier function is nil. + ErrVerifierNil = errors.New("verifier function is nil") + + // CategoryRange allows the upper bound on the category range to be adjusted + CategoryRange = DefaultCategoryRange + + privContainerMountLabel string +) + +// Context is a representation of the SELinux label broken into 4 parts +type Context map[string]string + +// SetDisabled disables SELinux support for the package +func SetDisabled() { + setDisabled() +} + +// GetEnabled returns whether SELinux is currently enabled. +func GetEnabled() bool { + return getEnabled() +} + +// ClassIndex returns the int index for an object class in the loaded policy, +// or -1 and an error +func ClassIndex(class string) (int, error) { + return classIndex(class) +} + +// SetFileLabel sets the SELinux label for this path, following symlinks, +// or returns an error. +func SetFileLabel(fpath string, label string) error { + return setFileLabel(fpath, label) +} + +// LsetFileLabel sets the SELinux label for this path, not following symlinks, +// or returns an error. +func LsetFileLabel(fpath string, label string) error { + return lSetFileLabel(fpath, label) +} + +// FileLabel returns the SELinux label for this path, following symlinks, +// or returns an error. +func FileLabel(fpath string) (string, error) { + return fileLabel(fpath) +} + +// LfileLabel returns the SELinux label for this path, not following symlinks, +// or returns an error. +func LfileLabel(fpath string) (string, error) { + return lFileLabel(fpath) +} + +// SetFSCreateLabel tells the kernel what label to use for all file system objects +// created by this task. +// Set the label to an empty string to return to the default label. Calls to SetFSCreateLabel +// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until file system +// objects created by this task are finished to guarantee another goroutine does not migrate +// to the current thread before execution is complete. +func SetFSCreateLabel(label string) error { + return setFSCreateLabel(label) +} + +// FSCreateLabel returns the default label the kernel which the kernel is using +// for file system objects created by this task. "" indicates default. +func FSCreateLabel() (string, error) { + return fsCreateLabel() +} + +// CurrentLabel returns the SELinux label of the current process thread, or an error. +func CurrentLabel() (string, error) { + return currentLabel() +} + +// PidLabel returns the SELinux label of the given pid, or an error. +func PidLabel(pid int) (string, error) { + return pidLabel(pid) +} + +// ExecLabel returns the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. +func ExecLabel() (string, error) { + return execLabel() +} + +// CanonicalizeContext takes a context string and writes it to the kernel +// the function then returns the context that the kernel will use. Use this +// function to check if two contexts are equivalent +func CanonicalizeContext(val string) (string, error) { + return canonicalizeContext(val) +} + +// ComputeCreateContext requests the type transition from source to target for +// class from the kernel. +func ComputeCreateContext(source string, target string, class string) (string, error) { + return computeCreateContext(source, target, class) +} + +// CalculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound) +// of a source and target range. +// The glblub is calculated as the greater of the low sensitivities and +// the lower of the high sensitivities and the and of each category bitset. +func CalculateGlbLub(sourceRange, targetRange string) (string, error) { + return calculateGlbLub(sourceRange, targetRange) +} + +// SetExecLabel sets the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. Calls to SetExecLabel +// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until execution +// of the program is finished to guarantee another goroutine does not migrate to the current +// thread before execution is complete. +func SetExecLabel(label string) error { + return writeCon(attrPath("exec"), label) +} + +// SetTaskLabel sets the SELinux label for the current thread, or an error. +// This requires the dyntransition permission. Calls to SetTaskLabel should +// be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() to guarantee +// the current thread does not run in a new mislabeled thread. +func SetTaskLabel(label string) error { + return writeCon(attrPath("current"), label) +} + +// SetSocketLabel takes a process label and tells the kernel to assign the +// label to the next socket that gets created. Calls to SetSocketLabel +// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until +// the socket is created to guarantee another goroutine does not migrate +// to the current thread before execution is complete. +func SetSocketLabel(label string) error { + return writeCon(attrPath("sockcreate"), label) +} + +// SocketLabel retrieves the current socket label setting +func SocketLabel() (string, error) { + return readCon(attrPath("sockcreate")) +} + +// PeerLabel retrieves the label of the client on the other side of a socket +func PeerLabel(fd uintptr) (string, error) { + return peerLabel(fd) +} + +// SetKeyLabel takes a process label and tells the kernel to assign the +// label to the next kernel keyring that gets created. Calls to SetKeyLabel +// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until +// the kernel keyring is created to guarantee another goroutine does not migrate +// to the current thread before execution is complete. +func SetKeyLabel(label string) error { + return setKeyLabel(label) +} + +// KeyLabel retrieves the current kernel keyring label setting +func KeyLabel() (string, error) { + return readCon("/proc/self/attr/keycreate") +} + +// Get returns the Context as a string +func (c Context) Get() string { + return c.get() +} + +// NewContext creates a new Context struct from the specified label +func NewContext(label string) (Context, error) { + return newContext(label) +} + +// ClearLabels clears all reserved labels +func ClearLabels() { + clearLabels() +} + +// ReserveLabel reserves the MLS/MCS level component of the specified label +func ReserveLabel(label string) { + reserveLabel(label) +} + +// MLSEnabled checks if MLS is enabled. +func MLSEnabled() bool { + return isMLSEnabled() +} + +// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled +func EnforceMode() int { + return enforceMode() +} + +// SetEnforceMode sets the current SELinux mode Enforcing, Permissive. +// Disabled is not valid, since this needs to be set at boot time. +func SetEnforceMode(mode int) error { + return setEnforceMode(mode) +} + +// DefaultEnforceMode returns the systems default SELinux mode Enforcing, +// Permissive or Disabled. Note this is just the default at boot time. +// EnforceMode tells you the systems current mode. +func DefaultEnforceMode() int { + return defaultEnforceMode() +} + +// ReleaseLabel un-reserves the MLS/MCS Level field of the specified label, +// allowing it to be used by another process. +func ReleaseLabel(label string) { + releaseLabel(label) +} + +// ROFileLabel returns the specified SELinux readonly file label +func ROFileLabel() string { + return roFileLabel() +} + +// KVMContainerLabels returns the default processLabel and mountLabel to be used +// for kvm containers by the calling process. +func KVMContainerLabels() (string, string) { + return kvmContainerLabels() +} + +// InitContainerLabels returns the default processLabel and file labels to be +// used for containers running an init system like systemd by the calling process. +func InitContainerLabels() (string, string) { + return initContainerLabels() +} + +// ContainerLabels returns an allocated processLabel and fileLabel to be used for +// container labeling by the calling process. +func ContainerLabels() (processLabel string, fileLabel string) { + return containerLabels() +} + +// SecurityCheckContext validates that the SELinux label is understood by the kernel +func SecurityCheckContext(val string) error { + return securityCheckContext(val) +} + +// CopyLevel returns a label with the MLS/MCS level from src label replaced on +// the dest label. +func CopyLevel(src, dest string) (string, error) { + return copyLevel(src, dest) +} + +// Chcon changes the fpath file object to the SELinux label. +// If fpath is a directory and recurse is true, then Chcon walks the +// directory tree setting the label. +// +// The fpath itself is guaranteed to be relabeled last. +func Chcon(fpath string, label string, recurse bool) error { + return chcon(fpath, label, recurse) +} + +// DupSecOpt takes an SELinux process label and returns security options that +// can be used to set the SELinux Type and Level for future container processes. +func DupSecOpt(src string) ([]string, error) { + return dupSecOpt(src) +} + +// DisableSecOpt returns a security opt that can be used to disable SELinux +// labeling support for future container processes. +func DisableSecOpt() []string { + return []string{"disable"} +} + +// GetDefaultContextWithLevel gets a single context for the specified SELinux user +// identity that is reachable from the specified scon context. The context is based +// on the per-user /etc/selinux/{SELINUXTYPE}/contexts/users/ if it exists, +// and falls back to the global /etc/selinux/{SELINUXTYPE}/contexts/default_contexts +// file. +func GetDefaultContextWithLevel(user, level, scon string) (string, error) { + return getDefaultContextWithLevel(user, level, scon) +} + +// PrivContainerMountLabel returns mount label for privileged containers +func PrivContainerMountLabel() string { + // Make sure label is initialized. + _ = label("") + return privContainerMountLabel +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go new file mode 100644 index 0000000000..f1e95977d3 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -0,0 +1,1295 @@ +package selinux + +import ( + "bufio" + "bytes" + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "io/fs" + "math/big" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" + "sync" + + "github.com/opencontainers/selinux/pkg/pwalkdir" + "golang.org/x/sys/unix" +) + +const ( + minSensLen = 2 + contextFile = "/usr/share/containers/selinux/contexts" + selinuxDir = "/etc/selinux/" + selinuxUsersDir = "contexts/users" + defaultContexts = "contexts/default_contexts" + selinuxConfig = selinuxDir + "config" + selinuxfsMount = "/sys/fs/selinux" + selinuxTypeTag = "SELINUXTYPE" + selinuxTag = "SELINUX" + xattrNameSelinux = "security.selinux" +) + +type selinuxState struct { + mcsList map[string]bool + selinuxfs string + selinuxfsOnce sync.Once + enabledSet bool + enabled bool + sync.Mutex +} + +type level struct { + cats *big.Int + sens uint +} + +type mlsRange struct { + low *level + high *level +} + +type defaultSECtx struct { + userRdr io.Reader + verifier func(string) error + defaultRdr io.Reader + user, level, scon string +} + +type levelItem byte + +const ( + sensitivity levelItem = 's' + category levelItem = 'c' +) + +var ( + readOnlyFileLabel string + state = selinuxState{ + mcsList: make(map[string]bool), + } + + // for attrPath() + attrPathOnce sync.Once + haveThreadSelf bool + + // for policyRoot() + policyRootOnce sync.Once + policyRootVal string + + // for label() + loadLabelsOnce sync.Once + labels map[string]string +) + +func policyRoot() string { + policyRootOnce.Do(func() { + policyRootVal = filepath.Join(selinuxDir, readConfig(selinuxTypeTag)) + }) + + return policyRootVal +} + +func (s *selinuxState) setEnable(enabled bool) bool { + s.Lock() + defer s.Unlock() + s.enabledSet = true + s.enabled = enabled + return s.enabled +} + +func (s *selinuxState) getEnabled() bool { + s.Lock() + enabled := s.enabled + enabledSet := s.enabledSet + s.Unlock() + if enabledSet { + return enabled + } + + enabled = false + if fs := getSelinuxMountPoint(); fs != "" { + if con, _ := CurrentLabel(); con != "kernel" { + enabled = true + } + } + return s.setEnable(enabled) +} + +// setDisabled disables SELinux support for the package +func setDisabled() { + state.setEnable(false) +} + +func verifySELinuxfsMount(mnt string) bool { + var buf unix.Statfs_t + for { + err := unix.Statfs(mnt, &buf) + if err == nil { + break + } + if err == unix.EAGAIN || err == unix.EINTR { //nolint:errorlint // unix errors are bare + continue + } + return false + } + + if uint32(buf.Type) != uint32(unix.SELINUX_MAGIC) { + return false + } + if (buf.Flags & unix.ST_RDONLY) != 0 { + return false + } + + return true +} + +func findSELinuxfs() string { + // fast path: check the default mount first + if verifySELinuxfsMount(selinuxfsMount) { + return selinuxfsMount + } + + // check if selinuxfs is available before going the slow path + fs, err := os.ReadFile("/proc/filesystems") + if err != nil { + return "" + } + if !bytes.Contains(fs, []byte("\tselinuxfs\n")) { + return "" + } + + // slow path: try to find among the mounts + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "" + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for { + mnt := findSELinuxfsMount(scanner) + if mnt == "" { // error or not found + return "" + } + if verifySELinuxfsMount(mnt) { + return mnt + } + } +} + +// findSELinuxfsMount returns a next selinuxfs mount point found, +// if there is one, or an empty string in case of EOF or error. +func findSELinuxfsMount(s *bufio.Scanner) string { + for s.Scan() { + txt := s.Bytes() + // The first field after - is fs type. + // Safe as spaces in mountpoints are encoded as \040 + if !bytes.Contains(txt, []byte(" - selinuxfs ")) { + continue + } + const mPos = 5 // mount point is 5th field + fields := bytes.SplitN(txt, []byte(" "), mPos+1) + if len(fields) < mPos+1 { + continue + } + return string(fields[mPos-1]) + } + + return "" +} + +func (s *selinuxState) getSELinuxfs() string { + s.selinuxfsOnce.Do(func() { + s.selinuxfs = findSELinuxfs() + }) + + return s.selinuxfs +} + +// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs +// filesystem or an empty string if no mountpoint is found. Selinuxfs is +// a proc-like pseudo-filesystem that exposes the SELinux policy API to +// processes. The existence of an selinuxfs mount is used to determine +// whether SELinux is currently enabled or not. +func getSelinuxMountPoint() string { + return state.getSELinuxfs() +} + +// getEnabled returns whether SELinux is currently enabled. +func getEnabled() bool { + return state.getEnabled() +} + +func readConfig(target string) string { + in, err := os.Open(selinuxConfig) + if err != nil { + return "" + } + defer in.Close() + + scanner := bufio.NewScanner(in) + + for scanner.Scan() { + line := bytes.TrimSpace(scanner.Bytes()) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + fields := bytes.SplitN(line, []byte{'='}, 2) + if len(fields) != 2 { + continue + } + if bytes.Equal(fields[0], []byte(target)) { + return string(bytes.Trim(fields[1], `"`)) + } + } + return "" +} + +func isProcHandle(fh *os.File) error { + var buf unix.Statfs_t + + for { + err := unix.Fstatfs(int(fh.Fd()), &buf) + if err == nil { + break + } + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return &os.PathError{Op: "fstatfs", Path: fh.Name(), Err: err} + } + } + if buf.Type != unix.PROC_SUPER_MAGIC { + return fmt.Errorf("file %q is not on procfs", fh.Name()) + } + + return nil +} + +func readCon(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + + in, err := os.Open(fpath) + if err != nil { + return "", err + } + defer in.Close() + + if err := isProcHandle(in); err != nil { + return "", err + } + return readConFd(in) +} + +func readConFd(in *os.File) (string, error) { + data, err := io.ReadAll(in) + if err != nil { + return "", err + } + return string(bytes.TrimSuffix(data, []byte{0})), nil +} + +// classIndex returns the int index for an object class in the loaded policy, +// or -1 and an error +func classIndex(class string) (int, error) { + permpath := fmt.Sprintf("class/%s/index", class) + indexpath := filepath.Join(getSelinuxMountPoint(), permpath) + + indexB, err := os.ReadFile(indexpath) + if err != nil { + return -1, err + } + index, err := strconv.Atoi(string(indexB)) + if err != nil { + return -1, err + } + + return index, nil +} + +// lSetFileLabel sets the SELinux label for this path, not following symlinks, +// or returns an error. +func lSetFileLabel(fpath string, label string) error { + if fpath == "" { + return ErrEmptyPath + } + for { + err := unix.Lsetxattr(fpath, xattrNameSelinux, []byte(label), 0) + if err == nil { + break + } + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return &os.PathError{Op: "lsetxattr", Path: fpath, Err: err} + } + } + + return nil +} + +// setFileLabel sets the SELinux label for this path, following symlinks, +// or returns an error. +func setFileLabel(fpath string, label string) error { + if fpath == "" { + return ErrEmptyPath + } + for { + err := unix.Setxattr(fpath, xattrNameSelinux, []byte(label), 0) + if err == nil { + break + } + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return &os.PathError{Op: "setxattr", Path: fpath, Err: err} + } + } + + return nil +} + +// fileLabel returns the SELinux label for this path, following symlinks, +// or returns an error. +func fileLabel(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + + label, err := getxattr(fpath, xattrNameSelinux) + if err != nil { + return "", &os.PathError{Op: "getxattr", Path: fpath, Err: err} + } + // Trim the NUL byte at the end of the byte buffer, if present. + if len(label) > 0 && label[len(label)-1] == '\x00' { + label = label[:len(label)-1] + } + return string(label), nil +} + +// lFileLabel returns the SELinux label for this path, not following symlinks, +// or returns an error. +func lFileLabel(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + + label, err := lgetxattr(fpath, xattrNameSelinux) + if err != nil { + return "", &os.PathError{Op: "lgetxattr", Path: fpath, Err: err} + } + // Trim the NUL byte at the end of the byte buffer, if present. + if len(label) > 0 && label[len(label)-1] == '\x00' { + label = label[:len(label)-1] + } + return string(label), nil +} + +func setFSCreateLabel(label string) error { + return writeCon(attrPath("fscreate"), label) +} + +// fsCreateLabel returns the default label the kernel which the kernel is using +// for file system objects created by this task. "" indicates default. +func fsCreateLabel() (string, error) { + return readCon(attrPath("fscreate")) +} + +// currentLabel returns the SELinux label of the current process thread, or an error. +func currentLabel() (string, error) { + return readCon(attrPath("current")) +} + +// pidLabel returns the SELinux label of the given pid, or an error. +func pidLabel(pid int) (string, error) { + return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) +} + +// ExecLabel returns the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. +func execLabel() (string, error) { + return readCon(attrPath("exec")) +} + +func writeCon(fpath, val string) error { + if fpath == "" { + return ErrEmptyPath + } + if val == "" { + if !getEnabled() { + return nil + } + } + + out, err := os.OpenFile(fpath, os.O_WRONLY, 0) + if err != nil { + return err + } + defer out.Close() + + if err := isProcHandle(out); err != nil { + return err + } + + if val != "" { + _, err = out.Write([]byte(val)) + } else { + _, err = out.Write(nil) + } + if err != nil { + return err + } + return nil +} + +func attrPath(attr string) string { + // Linux >= 3.17 provides this + const threadSelfPrefix = "/proc/thread-self/attr" + + attrPathOnce.Do(func() { + st, err := os.Stat(threadSelfPrefix) + if err == nil && st.Mode().IsDir() { + haveThreadSelf = true + } + }) + + if haveThreadSelf { + return filepath.Join(threadSelfPrefix, attr) + } + + return filepath.Join("/proc/self/task", strconv.Itoa(unix.Gettid()), "attr", attr) +} + +// canonicalizeContext takes a context string and writes it to the kernel +// the function then returns the context that the kernel will use. Use this +// function to check if two contexts are equivalent +func canonicalizeContext(val string) (string, error) { + return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val) +} + +// computeCreateContext requests the type transition from source to target for +// class from the kernel. +func computeCreateContext(source string, target string, class string) (string, error) { + classidx, err := classIndex(class) + if err != nil { + return "", err + } + + return readWriteCon(filepath.Join(getSelinuxMountPoint(), "create"), fmt.Sprintf("%s %s %d", source, target, classidx)) +} + +// catsToBitset stores categories in a bitset. +func catsToBitset(cats string) (*big.Int, error) { + bitset := new(big.Int) + + catlist := strings.Split(cats, ",") + for _, r := range catlist { + ranges := strings.SplitN(r, ".", 2) + if len(ranges) > 1 { + catstart, err := parseLevelItem(ranges[0], category) + if err != nil { + return nil, err + } + catend, err := parseLevelItem(ranges[1], category) + if err != nil { + return nil, err + } + for i := catstart; i <= catend; i++ { + bitset.SetBit(bitset, int(i), 1) + } + } else { + cat, err := parseLevelItem(ranges[0], category) + if err != nil { + return nil, err + } + bitset.SetBit(bitset, int(cat), 1) + } + } + + return bitset, nil +} + +// parseLevelItem parses and verifies that a sensitivity or category are valid +func parseLevelItem(s string, sep levelItem) (uint, error) { + if len(s) < minSensLen || levelItem(s[0]) != sep { + return 0, ErrLevelSyntax + } + val, err := strconv.ParseUint(s[1:], 10, 32) + if err != nil { + return 0, err + } + + return uint(val), nil +} + +// parseLevel fills a level from a string that contains +// a sensitivity and categories +func (l *level) parseLevel(levelStr string) error { + lvl := strings.SplitN(levelStr, ":", 2) + sens, err := parseLevelItem(lvl[0], sensitivity) + if err != nil { + return fmt.Errorf("failed to parse sensitivity: %w", err) + } + l.sens = sens + if len(lvl) > 1 { + cats, err := catsToBitset(lvl[1]) + if err != nil { + return fmt.Errorf("failed to parse categories: %w", err) + } + l.cats = cats + } + + return nil +} + +// rangeStrToMLSRange marshals a string representation of a range. +func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { + r := &mlsRange{} + l := strings.SplitN(rangeStr, "-", 2) + + switch len(l) { + // rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023 + case 2: + r.high = &level{} + if err := r.high.parseLevel(l[1]); err != nil { + return nil, fmt.Errorf("failed to parse high level %q: %w", l[1], err) + } + fallthrough + // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023 + case 1: + r.low = &level{} + if err := r.low.parseLevel(l[0]); err != nil { + return nil, fmt.Errorf("failed to parse low level %q: %w", l[0], err) + } + } + + if r.high == nil { + r.high = r.low + } + + return r, nil +} + +// bitsetToStr takes a category bitset and returns it in the +// canonical selinux syntax +func bitsetToStr(c *big.Int) string { + var str string + + length := 0 + for i := int(c.TrailingZeroBits()); i < c.BitLen(); i++ { + if c.Bit(i) == 0 { + continue + } + if length == 0 { + if str != "" { + str += "," + } + str += "c" + strconv.Itoa(i) + } + if c.Bit(i+1) == 1 { + length++ + continue + } + if length == 1 { + str += ",c" + strconv.Itoa(i) + } else if length > 1 { + str += ".c" + strconv.Itoa(i) + } + length = 0 + } + + return str +} + +func (l *level) equal(l2 *level) bool { + if l2 == nil || l == nil { + return l == l2 + } + if l2.sens != l.sens { + return false + } + if l2.cats == nil || l.cats == nil { + return l2.cats == l.cats + } + return l.cats.Cmp(l2.cats) == 0 +} + +// String returns an mlsRange as a string. +func (m mlsRange) String() string { + low := "s" + strconv.Itoa(int(m.low.sens)) + if m.low.cats != nil && m.low.cats.BitLen() > 0 { + low += ":" + bitsetToStr(m.low.cats) + } + + if m.low.equal(m.high) { + return low + } + + high := "s" + strconv.Itoa(int(m.high.sens)) + if m.high.cats != nil && m.high.cats.BitLen() > 0 { + high += ":" + bitsetToStr(m.high.cats) + } + + return low + "-" + high +} + +func max(a, b uint) uint { + if a > b { + return a + } + return b +} + +func min(a, b uint) uint { + if a < b { + return a + } + return b +} + +// calculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound) +// of a source and target range. +// The glblub is calculated as the greater of the low sensitivities and +// the lower of the high sensitivities and the and of each category bitset. +func calculateGlbLub(sourceRange, targetRange string) (string, error) { + s, err := rangeStrToMLSRange(sourceRange) + if err != nil { + return "", err + } + t, err := rangeStrToMLSRange(targetRange) + if err != nil { + return "", err + } + + if s.high.sens < t.low.sens || t.high.sens < s.low.sens { + /* these ranges have no common sensitivities */ + return "", ErrIncomparable + } + + outrange := &mlsRange{low: &level{}, high: &level{}} + + /* take the greatest of the low */ + outrange.low.sens = max(s.low.sens, t.low.sens) + + /* take the least of the high */ + outrange.high.sens = min(s.high.sens, t.high.sens) + + /* find the intersecting categories */ + if s.low.cats != nil && t.low.cats != nil { + outrange.low.cats = new(big.Int) + outrange.low.cats.And(s.low.cats, t.low.cats) + } + if s.high.cats != nil && t.high.cats != nil { + outrange.high.cats = new(big.Int) + outrange.high.cats.And(s.high.cats, t.high.cats) + } + + return outrange.String(), nil +} + +func readWriteCon(fpath string, val string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + f, err := os.OpenFile(fpath, os.O_RDWR, 0) + if err != nil { + return "", err + } + defer f.Close() + + _, err = f.Write([]byte(val)) + if err != nil { + return "", err + } + + return readConFd(f) +} + +// peerLabel retrieves the label of the client on the other side of a socket +func peerLabel(fd uintptr) (string, error) { + l, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) + if err != nil { + return "", &os.PathError{Op: "getsockopt", Path: "fd " + strconv.Itoa(int(fd)), Err: err} + } + return l, nil +} + +// setKeyLabel takes a process label and tells the kernel to assign the +// label to the next kernel keyring that gets created +func setKeyLabel(label string) error { + err := writeCon("/proc/self/attr/keycreate", label) + if errors.Is(err, os.ErrNotExist) { + return nil + } + if label == "" && errors.Is(err, os.ErrPermission) { + return nil + } + return err +} + +// get returns the Context as a string +func (c Context) get() string { + if l := c["level"]; l != "" { + return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + l + } + return c["user"] + ":" + c["role"] + ":" + c["type"] +} + +// newContext creates a new Context struct from the specified label +func newContext(label string) (Context, error) { + c := make(Context) + + if len(label) != 0 { + con := strings.SplitN(label, ":", 4) + if len(con) < 3 { + return c, ErrInvalidLabel + } + c["user"] = con[0] + c["role"] = con[1] + c["type"] = con[2] + if len(con) > 3 { + c["level"] = con[3] + } + } + return c, nil +} + +// clearLabels clears all reserved labels +func clearLabels() { + state.Lock() + state.mcsList = make(map[string]bool) + state.Unlock() +} + +// reserveLabel reserves the MLS/MCS level component of the specified label +func reserveLabel(label string) { + if len(label) != 0 { + con := strings.SplitN(label, ":", 4) + if len(con) > 3 { + _ = mcsAdd(con[3]) + } + } +} + +func selinuxEnforcePath() string { + return filepath.Join(getSelinuxMountPoint(), "enforce") +} + +// isMLSEnabled checks if MLS is enabled. +func isMLSEnabled() bool { + enabledB, err := os.ReadFile(filepath.Join(getSelinuxMountPoint(), "mls")) + if err != nil { + return false + } + return bytes.Equal(enabledB, []byte{'1'}) +} + +// enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled +func enforceMode() int { + var enforce int + + enforceB, err := os.ReadFile(selinuxEnforcePath()) + if err != nil { + return -1 + } + enforce, err = strconv.Atoi(string(enforceB)) + if err != nil { + return -1 + } + return enforce +} + +// setEnforceMode sets the current SELinux mode Enforcing, Permissive. +// Disabled is not valid, since this needs to be set at boot time. +func setEnforceMode(mode int) error { + //nolint:gosec // ignore G306: permissions to be 0600 or less. + return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) +} + +// defaultEnforceMode returns the systems default SELinux mode Enforcing, +// Permissive or Disabled. Note this is just the default at boot time. +// EnforceMode tells you the systems current mode. +func defaultEnforceMode() int { + switch readConfig(selinuxTag) { + case "enforcing": + return Enforcing + case "permissive": + return Permissive + } + return Disabled +} + +func mcsAdd(mcs string) error { + if mcs == "" { + return nil + } + state.Lock() + defer state.Unlock() + if state.mcsList[mcs] { + return ErrMCSAlreadyExists + } + state.mcsList[mcs] = true + return nil +} + +func mcsDelete(mcs string) { + if mcs == "" { + return + } + state.Lock() + defer state.Unlock() + state.mcsList[mcs] = false +} + +func intToMcs(id int, catRange uint32) string { + var ( + SETSIZE = int(catRange) + TIER = SETSIZE + ORD = id + ) + + if id < 1 || id > 523776 { + return "" + } + + for ORD > TIER { + ORD -= TIER + TIER-- + } + TIER = SETSIZE - TIER + ORD += TIER + return fmt.Sprintf("s0:c%d,c%d", TIER, ORD) +} + +func uniqMcs(catRange uint32) string { + var ( + n uint32 + c1, c2 uint32 + mcs string + ) + + for { + _ = binary.Read(rand.Reader, binary.LittleEndian, &n) + c1 = n % catRange + _ = binary.Read(rand.Reader, binary.LittleEndian, &n) + c2 = n % catRange + if c1 == c2 { + continue + } else if c1 > c2 { + c1, c2 = c2, c1 + } + mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) + if err := mcsAdd(mcs); err != nil { + continue + } + break + } + return mcs +} + +// releaseLabel un-reserves the MLS/MCS Level field of the specified label, +// allowing it to be used by another process. +func releaseLabel(label string) { + if len(label) != 0 { + con := strings.SplitN(label, ":", 4) + if len(con) > 3 { + mcsDelete(con[3]) + } + } +} + +// roFileLabel returns the specified SELinux readonly file label +func roFileLabel() string { + return readOnlyFileLabel +} + +func openContextFile() (*os.File, error) { + if f, err := os.Open(contextFile); err == nil { + return f, nil + } + return os.Open(filepath.Join(policyRoot(), "contexts", "lxc_contexts")) +} + +func loadLabels() { + labels = make(map[string]string) + in, err := openContextFile() + if err != nil { + return + } + defer in.Close() + + scanner := bufio.NewScanner(in) + + for scanner.Scan() { + line := bytes.TrimSpace(scanner.Bytes()) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + fields := bytes.SplitN(line, []byte{'='}, 2) + if len(fields) != 2 { + continue + } + key, val := bytes.TrimSpace(fields[0]), bytes.TrimSpace(fields[1]) + labels[string(key)] = string(bytes.Trim(val, `"`)) + } + + con, _ := NewContext(labels["file"]) + con["level"] = fmt.Sprintf("s0:c%d,c%d", maxCategory-2, maxCategory-1) + privContainerMountLabel = con.get() + reserveLabel(privContainerMountLabel) +} + +func label(key string) string { + loadLabelsOnce.Do(func() { + loadLabels() + }) + return labels[key] +} + +// kvmContainerLabels returns the default processLabel and mountLabel to be used +// for kvm containers by the calling process. +func kvmContainerLabels() (string, string) { + processLabel := label("kvm_process") + if processLabel == "" { + processLabel = label("process") + } + + return addMcs(processLabel, label("file")) +} + +// initContainerLabels returns the default processLabel and file labels to be +// used for containers running an init system like systemd by the calling process. +func initContainerLabels() (string, string) { + processLabel := label("init_process") + if processLabel == "" { + processLabel = label("process") + } + + return addMcs(processLabel, label("file")) +} + +// containerLabels returns an allocated processLabel and fileLabel to be used for +// container labeling by the calling process. +func containerLabels() (processLabel string, fileLabel string) { + if !getEnabled() { + return "", "" + } + + processLabel = label("process") + fileLabel = label("file") + readOnlyFileLabel = label("ro_file") + + if processLabel == "" || fileLabel == "" { + return "", fileLabel + } + + if readOnlyFileLabel == "" { + readOnlyFileLabel = fileLabel + } + + return addMcs(processLabel, fileLabel) +} + +func addMcs(processLabel, fileLabel string) (string, string) { + scon, _ := NewContext(processLabel) + if scon["level"] != "" { + mcs := uniqMcs(CategoryRange) + scon["level"] = mcs + processLabel = scon.Get() + scon, _ = NewContext(fileLabel) + scon["level"] = mcs + fileLabel = scon.Get() + } + return processLabel, fileLabel +} + +// securityCheckContext validates that the SELinux label is understood by the kernel +func securityCheckContext(val string) error { + //nolint:gosec // ignore G306: permissions to be 0600 or less. + return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) +} + +// copyLevel returns a label with the MLS/MCS level from src label replaced on +// the dest label. +func copyLevel(src, dest string) (string, error) { + if src == "" { + return "", nil + } + if err := SecurityCheckContext(src); err != nil { + return "", err + } + if err := SecurityCheckContext(dest); err != nil { + return "", err + } + scon, err := NewContext(src) + if err != nil { + return "", err + } + tcon, err := NewContext(dest) + if err != nil { + return "", err + } + mcsDelete(tcon["level"]) + _ = mcsAdd(scon["level"]) + tcon["level"] = scon["level"] + return tcon.Get(), nil +} + +// chcon changes the fpath file object to the SELinux label. +// If fpath is a directory and recurse is true, then chcon walks the +// directory tree setting the label. +func chcon(fpath string, label string, recurse bool) error { + if fpath == "" { + return ErrEmptyPath + } + if label == "" { + return nil + } + + excludePaths := map[string]bool{ + "/": true, + "/bin": true, + "/boot": true, + "/dev": true, + "/etc": true, + "/etc/passwd": true, + "/etc/pki": true, + "/etc/shadow": true, + "/home": true, + "/lib": true, + "/lib64": true, + "/media": true, + "/opt": true, + "/proc": true, + "/root": true, + "/run": true, + "/sbin": true, + "/srv": true, + "/sys": true, + "/tmp": true, + "/usr": true, + "/var": true, + "/var/lib": true, + "/var/log": true, + } + + if home := os.Getenv("HOME"); home != "" { + excludePaths[home] = true + } + + if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { + if usr, err := user.Lookup(sudoUser); err == nil { + excludePaths[usr.HomeDir] = true + } + } + + if fpath != "/" { + fpath = strings.TrimSuffix(fpath, "/") + } + if excludePaths[fpath] { + return fmt.Errorf("SELinux relabeling of %s is not allowed", fpath) + } + + if !recurse { + err := lSetFileLabel(fpath, label) + if err != nil { + // Check if file doesn't exist, must have been removed + if errors.Is(err, os.ErrNotExist) { + return nil + } + // Check if current label is correct on disk + flabel, nerr := lFileLabel(fpath) + if nerr == nil && flabel == label { + return nil + } + // Check if file doesn't exist, must have been removed + if errors.Is(nerr, os.ErrNotExist) { + return nil + } + return err + } + return nil + } + + return rchcon(fpath, label) +} + +func rchcon(fpath, label string) error { //revive:disable:cognitive-complexity + fastMode := false + // If the current label matches the new label, assume + // other labels are correct. + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + fastMode = true + } + return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { + if fastMode { + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + return nil + } + } + err := lSetFileLabel(p, label) + // Walk a file tree can race with removal, so ignore ENOENT. + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + }) +} + +// dupSecOpt takes an SELinux process label and returns security options that +// can be used to set the SELinux Type and Level for future container processes. +func dupSecOpt(src string) ([]string, error) { + if src == "" { + return nil, nil + } + con, err := NewContext(src) + if err != nil { + return nil, err + } + if con["user"] == "" || + con["role"] == "" || + con["type"] == "" { + return nil, nil + } + dup := []string{ + "user:" + con["user"], + "role:" + con["role"], + "type:" + con["type"], + } + + if con["level"] != "" { + dup = append(dup, "level:"+con["level"]) + } + + return dup, nil +} + +// findUserInContext scans the reader for a valid SELinux context +// match that is verified with the verifier. Invalid contexts are +// skipped. It returns a matched context or an empty string if no +// match is found. If a scanner error occurs, it is returned. +func findUserInContext(context Context, r io.Reader, verifier func(string) error) (string, error) { + fromRole := context["role"] + fromType := context["type"] + scanner := bufio.NewScanner(r) + + for scanner.Scan() { + fromConns := strings.Fields(scanner.Text()) + if len(fromConns) == 0 { + // Skip blank lines + continue + } + + line := fromConns[0] + + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + + // user context files contexts are formatted as + // role_r:type_t:s0 where the user is missing. + lineArr := strings.SplitN(line, ":", 4) + // skip context with typo, or role and type do not match + if len(lineArr) != 3 || + lineArr[0] != fromRole || + lineArr[1] != fromType { + continue + } + + for _, cc := range fromConns[1:] { + toConns := strings.SplitN(cc, ":", 4) + if len(toConns) != 3 { + continue + } + + context["role"] = toConns[0] + context["type"] = toConns[1] + + outConn := context.get() + if err := verifier(outConn); err != nil { + continue + } + + return outConn, nil + } + } + if err := scanner.Err(); err != nil { + return "", fmt.Errorf("failed to scan for context: %w", err) + } + + return "", nil +} + +func getDefaultContextFromReaders(c *defaultSECtx) (string, error) { + if c.verifier == nil { + return "", ErrVerifierNil + } + + context, err := newContext(c.scon) + if err != nil { + return "", fmt.Errorf("failed to create label for %s: %w", c.scon, err) + } + + // set so the verifier validates the matched context with the provided user and level. + context["user"] = c.user + context["level"] = c.level + + conn, err := findUserInContext(context, c.userRdr, c.verifier) + if err != nil { + return "", err + } + + if conn != "" { + return conn, nil + } + + conn, err = findUserInContext(context, c.defaultRdr, c.verifier) + if err != nil { + return "", err + } + + if conn != "" { + return conn, nil + } + + return "", fmt.Errorf("context %q not found: %w", c.scon, ErrContextMissing) +} + +func getDefaultContextWithLevel(user, level, scon string) (string, error) { + userPath := filepath.Join(policyRoot(), selinuxUsersDir, user) + fu, err := os.Open(userPath) + if err != nil { + return "", err + } + defer fu.Close() + + defaultPath := filepath.Join(policyRoot(), defaultContexts) + fd, err := os.Open(defaultPath) + if err != nil { + return "", err + } + defer fd.Close() + + c := defaultSECtx{ + user: user, + level: level, + scon: scon, + userRdr: fu, + defaultRdr: fd, + verifier: securityCheckContext, + } + + return getDefaultContextFromReaders(&c) +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go new file mode 100644 index 0000000000..bc3fd3b370 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -0,0 +1,155 @@ +//go:build !linux +// +build !linux + +package selinux + +func attrPath(string) string { + return "" +} + +func readCon(fpath string) (string, error) { + return "", nil +} + +func writeCon(string, string) error { + return nil +} + +func setDisabled() {} + +func getEnabled() bool { + return false +} + +func classIndex(class string) (int, error) { + return -1, nil +} + +func setFileLabel(fpath string, label string) error { + return nil +} + +func lSetFileLabel(fpath string, label string) error { + return nil +} + +func fileLabel(fpath string) (string, error) { + return "", nil +} + +func lFileLabel(fpath string) (string, error) { + return "", nil +} + +func setFSCreateLabel(label string) error { + return nil +} + +func fsCreateLabel() (string, error) { + return "", nil +} + +func currentLabel() (string, error) { + return "", nil +} + +func pidLabel(pid int) (string, error) { + return "", nil +} + +func execLabel() (string, error) { + return "", nil +} + +func canonicalizeContext(val string) (string, error) { + return "", nil +} + +func computeCreateContext(source string, target string, class string) (string, error) { + return "", nil +} + +func calculateGlbLub(sourceRange, targetRange string) (string, error) { + return "", nil +} + +func peerLabel(fd uintptr) (string, error) { + return "", nil +} + +func setKeyLabel(label string) error { + return nil +} + +func (c Context) get() string { + return "" +} + +func newContext(label string) (Context, error) { + return Context{}, nil +} + +func clearLabels() { +} + +func reserveLabel(label string) { +} + +func isMLSEnabled() bool { + return false +} + +func enforceMode() int { + return Disabled +} + +func setEnforceMode(mode int) error { + return nil +} + +func defaultEnforceMode() int { + return Disabled +} + +func releaseLabel(label string) { +} + +func roFileLabel() string { + return "" +} + +func kvmContainerLabels() (string, string) { + return "", "" +} + +func initContainerLabels() (string, string) { + return "", "" +} + +func containerLabels() (processLabel string, fileLabel string) { + return "", "" +} + +func securityCheckContext(val string) error { + return nil +} + +func copyLevel(src, dest string) (string, error) { + return "", nil +} + +func chcon(fpath string, label string, recurse bool) error { + return nil +} + +func dupSecOpt(src string) ([]string, error) { + return nil, nil +} + +func getDefaultContextWithLevel(user, level, scon string) (string, error) { + return "", nil +} + +func label(_ string) string { + return "" +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go new file mode 100644 index 0000000000..9e473ca168 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go @@ -0,0 +1,71 @@ +package selinux + +import ( + "golang.org/x/sys/unix" +) + +// lgetxattr returns a []byte slice containing the value of +// an extended attribute attr set for path. +func lgetxattr(path, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := doLgetxattr(path, attr, dest) + for errno == unix.ERANGE { //nolint:errorlint // unix errors are bare + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = doLgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } + + dest = make([]byte, sz) + sz, errno = doLgetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// doLgetxattr is a wrapper that retries on EINTR +func doLgetxattr(path, attr string, dest []byte) (int, error) { + for { + sz, err := unix.Lgetxattr(path, attr, dest) + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return sz, err + } + } +} + +// getxattr returns a []byte slice containing the value of +// an extended attribute attr set for path. +func getxattr(path, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := dogetxattr(path, attr, dest) + for errno == unix.ERANGE { //nolint:errorlint // unix errors are bare + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = dogetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } + + dest = make([]byte, sz) + sz, errno = dogetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// dogetxattr is a wrapper that retries on EINTR +func dogetxattr(path, attr string, dest []byte) (int, error) { + for { + sz, err := unix.Getxattr(path, attr, dest) + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return sz, err + } + } +} diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md new file mode 100644 index 0000000000..7e78dce015 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md @@ -0,0 +1,48 @@ +## pwalk: parallel implementation of filepath.Walk + +This is a wrapper for [filepath.Walk](https://pkg.go.dev/path/filepath?tab=doc#Walk) +which may speed it up by calling multiple callback functions (WalkFunc) in parallel, +utilizing goroutines. + +By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks. +This can be changed by using WalkN function which has the additional +parameter, specifying the number of goroutines (concurrency). + +### pwalk vs pwalkdir + +This package is deprecated in favor of +[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir), +which is faster, but requires at least Go 1.16. + +### Caveats + +Please note the following limitations of this code: + +* Unlike filepath.Walk, the order of calls is non-deterministic; + +* Only primitive error handling is supported: + + * filepath.SkipDir is not supported; + + * no errors are ever passed to WalkFunc; + + * once any error is returned from any WalkFunc instance, no more new calls + to WalkFunc are made, and the error is returned to the caller of Walk; + + * if more than one walkFunc instance will return an error, only one + of such errors will be propagated and returned by Walk, others + will be silently discarded. + +### Documentation + +For the official documentation, see +https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalk?tab=doc + +### Benchmarks + +For a WalkFunc that consists solely of the return statement, this +implementation is about 10% slower than the standard library's +filepath.Walk. + +Otherwise (if a WalkFunc is doing something) this is usually faster, +except when the WalkN(..., 1) is used. diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go new file mode 100644 index 0000000000..a28b4c4bbb --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go @@ -0,0 +1,123 @@ +package pwalk + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "sync" +) + +// WalkFunc is the type of the function called by Walk to visit each +// file or directory. It is an alias for [filepath.WalkFunc]. +// +// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir] and [fs.WalkDirFunc]. +type WalkFunc = filepath.WalkFunc + +// Walk is a wrapper for filepath.Walk which can call multiple walkFn +// in parallel, allowing to handle each item concurrently. A maximum of +// twice the runtime.NumCPU() walkFn will be called at any one time. +// If you want to change the maximum, use WalkN instead. +// +// The order of calls is non-deterministic. +// +// Note that this implementation only supports primitive error handling: +// +// - no errors are ever passed to walkFn; +// +// - once a walkFn returns any error, all further processing stops +// and the error is returned to the caller of Walk; +// +// - filepath.SkipDir is not supported; +// +// - if more than one walkFn instance will return an error, only one +// of such errors will be propagated and returned by Walk, others +// will be silently discarded. +// +// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir.Walk] +func Walk(root string, walkFn WalkFunc) error { + return WalkN(root, walkFn, runtime.NumCPU()*2) +} + +// WalkN is a wrapper for filepath.Walk which can call multiple walkFn +// in parallel, allowing to handle each item concurrently. A maximum of +// num walkFn will be called at any one time. +// +// Please see Walk documentation for caveats of using this function. +// +// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir.WalkN] +func WalkN(root string, walkFn WalkFunc, num int) error { + // make sure limit is sensible + if num < 1 { + return fmt.Errorf("walk(%q): num must be > 0", root) + } + + files := make(chan *walkArgs, 2*num) + errCh := make(chan error, 1) // get the first error, ignore others + + // Start walking a tree asap + var ( + err error + wg sync.WaitGroup + + rootLen = len(root) + rootEntry *walkArgs + ) + wg.Add(1) + go func() { + err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error { + if err != nil { + close(files) + return err + } + if len(p) == rootLen { + // Root entry is processed separately below. + rootEntry = &walkArgs{path: p, info: &info} + return nil + } + // add a file to the queue unless a callback sent an error + select { + case e := <-errCh: + close(files) + return e + default: + files <- &walkArgs{path: p, info: &info} + return nil + } + }) + if err == nil { + close(files) + } + wg.Done() + }() + + wg.Add(num) + for i := 0; i < num; i++ { + go func() { + for file := range files { + if e := walkFn(file.path, *file.info, nil); e != nil { + select { + case errCh <- e: // sent ok + default: // buffer full + } + } + } + wg.Done() + }() + } + + wg.Wait() + + if err == nil { + err = walkFn(rootEntry.path, *rootEntry.info, nil) + } + + return err +} + +// walkArgs holds the arguments that were passed to the Walk or WalkN +// functions. +type walkArgs struct { + info *os.FileInfo + path string +} diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md new file mode 100644 index 0000000000..068ac40056 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md @@ -0,0 +1,54 @@ +## pwalkdir: parallel implementation of filepath.WalkDir + +This is a wrapper for [filepath.WalkDir](https://pkg.go.dev/path/filepath#WalkDir) +which may speed it up by calling multiple callback functions (WalkDirFunc) +in parallel, utilizing goroutines. + +By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks. +This can be changed by using WalkN function which has the additional +parameter, specifying the number of goroutines (concurrency). + +### pwalk vs pwalkdir + +This package is very similar to +[pwalk](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir), +but utilizes `filepath.WalkDir` (added to Go 1.16), which does not call stat(2) +on every entry and is therefore faster (up to 3x, depending on usage scenario). + +Users who are OK with requiring Go 1.16+ should switch to this +implementation. + +### Caveats + +Please note the following limitations of this code: + +* Unlike filepath.WalkDir, the order of calls is non-deterministic; + +* Only primitive error handling is supported: + + * fs.SkipDir is not supported; + + * no errors are ever passed to WalkDirFunc; + + * once any error is returned from any walkDirFunc instance, no more calls + to WalkDirFunc are made, and the error is returned to the caller of WalkDir; + + * if more than one WalkDirFunc instance will return an error, only one + of such errors will be propagated to and returned by WalkDir, others + will be silently discarded. + +### Documentation + +For the official documentation, see +https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir + +### Benchmarks + +For a WalkDirFunc that consists solely of the return statement, this +implementation is about 15% slower than the standard library's +filepath.WalkDir. + +Otherwise (if a WalkDirFunc is actually doing something) this is usually +faster, except when the WalkDirN(..., 1) is used. Run `go test -bench .` +to see how different operations can benefit from it, as well as how the +level of paralellism affects the speed. diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go new file mode 100644 index 0000000000..0f5d9f580d --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go @@ -0,0 +1,116 @@ +//go:build go1.16 +// +build go1.16 + +package pwalkdir + +import ( + "fmt" + "io/fs" + "path/filepath" + "runtime" + "sync" +) + +// Walk is a wrapper for filepath.WalkDir which can call multiple walkFn +// in parallel, allowing to handle each item concurrently. A maximum of +// twice the runtime.NumCPU() walkFn will be called at any one time. +// If you want to change the maximum, use WalkN instead. +// +// The order of calls is non-deterministic. +// +// Note that this implementation only supports primitive error handling: +// +// - no errors are ever passed to walkFn; +// +// - once a walkFn returns any error, all further processing stops +// and the error is returned to the caller of Walk; +// +// - filepath.SkipDir is not supported; +// +// - if more than one walkFn instance will return an error, only one +// of such errors will be propagated and returned by Walk, others +// will be silently discarded. +func Walk(root string, walkFn fs.WalkDirFunc) error { + return WalkN(root, walkFn, runtime.NumCPU()*2) +} + +// WalkN is a wrapper for filepath.WalkDir which can call multiple walkFn +// in parallel, allowing to handle each item concurrently. A maximum of +// num walkFn will be called at any one time. +// +// Please see Walk documentation for caveats of using this function. +func WalkN(root string, walkFn fs.WalkDirFunc, num int) error { + // make sure limit is sensible + if num < 1 { + return fmt.Errorf("walk(%q): num must be > 0", root) + } + + files := make(chan *walkArgs, 2*num) + errCh := make(chan error, 1) // Get the first error, ignore others. + + // Start walking a tree asap. + var ( + err error + wg sync.WaitGroup + + rootLen = len(root) + rootEntry *walkArgs + ) + wg.Add(1) + go func() { + err = filepath.WalkDir(root, func(p string, entry fs.DirEntry, err error) error { + if err != nil { + close(files) + return err + } + if len(p) == rootLen { + // Root entry is processed separately below. + rootEntry = &walkArgs{path: p, entry: entry} + return nil + } + // Add a file to the queue unless a callback sent an error. + select { + case e := <-errCh: + close(files) + return e + default: + files <- &walkArgs{path: p, entry: entry} + return nil + } + }) + if err == nil { + close(files) + } + wg.Done() + }() + + wg.Add(num) + for i := 0; i < num; i++ { + go func() { + for file := range files { + if e := walkFn(file.path, file.entry, nil); e != nil { + select { + case errCh <- e: // sent ok + default: // buffer full + } + } + } + wg.Done() + }() + } + + wg.Wait() + + if err == nil { + err = walkFn(rootEntry.path, rootEntry.entry, nil) + } + + return err +} + +// walkArgs holds the arguments that were passed to the Walk or WalkN +// functions. +type walkArgs struct { + entry fs.DirEntry + path string +} diff --git a/vendor/github.com/ostreedev/ostree-go/LICENSE b/vendor/github.com/ostreedev/ostree-go/LICENSE new file mode 100644 index 0000000000..aa93b4dab9 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/LICENSE @@ -0,0 +1,17 @@ +Portions of this code are derived from: + +https://github.com/dradtke/gotk3 + +Copyright (c) 2013 Conformal Systems LLC. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go new file mode 100644 index 0000000000..a4ad0f0005 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +import ( + "unsafe" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +/* + * GBoolean + */ + +// GBoolean is a Go representation of glib's gboolean +type GBoolean C.gboolean + +func NewGBoolean() GBoolean { + return GBoolean(0) +} + +func GBool(b bool) GBoolean { + if b { + return GBoolean(1) + } + return GBoolean(0) +} + +func (b GBoolean) Ptr() unsafe.Pointer { + return unsafe.Pointer(&b) +} + +func GoBool(b GBoolean) bool { + if b != 0 { + return true + } + return false +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go new file mode 100644 index 0000000000..537db4720d --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +import ( + "unsafe" +) + +// GIO types + +type GCancellable struct { + *GObject +} + +func (self *GCancellable) native() *C.GCancellable { + return (*C.GCancellable)(unsafe.Pointer(self)) +} + +func (self *GCancellable) Ptr() unsafe.Pointer { + return unsafe.Pointer(self) +} + +// At the moment, no cancellable API, just pass nil diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go new file mode 100644 index 0000000000..714b15d0bf --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "errors" + "unsafe" +) + +/* + * GError + */ + +// GError is a representation of GLib's GError +type GError struct { + ptr unsafe.Pointer +} + +func NewGError() GError { + return GError{nil} +} + +func (e GError) Ptr() unsafe.Pointer { + if e.ptr == nil { + return nil + } + return e.ptr +} + +func (e GError) Nil() { + e.ptr = nil +} + +func (e *GError) native() *C.GError { + if e == nil || e.ptr == nil { + return nil + } + return (*C.GError)(e.ptr) +} + +func ToGError(ptr unsafe.Pointer) GError { + return GError{ptr} +} + +func ConvertGError(e GError) error { + defer C.g_error_free(e.native()) + return errors.New(C.GoString((*C.char)(C._g_error_get_message(e.native())))) +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go new file mode 100644 index 0000000000..babe705096 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "unsafe" +) + +/* + * GFile + */ + +type GFile struct { + ptr unsafe.Pointer +} + +func (f GFile) Ptr() unsafe.Pointer { + return f.ptr +} + +func NewGFile() *GFile { + return &GFile{nil} +} + +func ToGFile(ptr unsafe.Pointer) *GFile { + gf := NewGFile() + gf.ptr = ptr + return gf +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go new file mode 100644 index 0000000000..9c155834a8 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "unsafe" +) + +/* + * GFileInfo + */ + +type GFileInfo struct { + ptr unsafe.Pointer +} + +func (fi GFileInfo) Ptr() unsafe.Pointer { + return fi.ptr +} + +func NewGFileInfo() GFileInfo { + var fi GFileInfo = GFileInfo{nil} + return fi +} + +func ToGFileInfo(p unsafe.Pointer) *GFileInfo { + var fi *GFileInfo = &GFileInfo{} + fi.ptr = p + return fi +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go new file mode 100644 index 0000000000..20cc321cbb --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +import ( + "unsafe" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +/* + * GHashTable + */ +type GHashTable struct { + ptr unsafe.Pointer +} + +func (ht *GHashTable) Ptr() unsafe.Pointer { + return ht.ptr +} + +func (ht *GHashTable) native() *C.GHashTable { + return (*C.GHashTable)(ht.ptr) +} + +func ToGHashTable(ptr unsafe.Pointer) *GHashTable { + return &GHashTable{ptr} +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go new file mode 100644 index 0000000000..1657edf5fc --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +import ( + "unsafe" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +/* + * GHashTableIter + */ +type GHashTableIter struct { + ptr unsafe.Pointer +} + +func (ht *GHashTableIter) Ptr() unsafe.Pointer { + return ht.ptr +} + +func (ht *GHashTableIter) native() *C.GHashTableIter { + return (*C.GHashTableIter)(ht.ptr) +} + +func ToGHashTableIter(ptr unsafe.Pointer) *GHashTableIter { + return &GHashTableIter{ptr} +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go new file mode 100644 index 0000000000..f3d3aa5266 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h new file mode 100644 index 0000000000..a55bd242f9 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h @@ -0,0 +1,17 @@ +#include + +static char * +_g_error_get_message (GError *error) +{ + g_assert (error != NULL); + return error->message; +} + +static const char * +_g_variant_lookup_string (GVariant *v, const char *key) +{ + const char *r; + if (g_variant_lookup (v, key, "&s", &r)) + return r; + return NULL; +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go new file mode 100644 index 0000000000..dedbe749a4 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "unsafe" +) + +/* + * GObject + */ + +// IObject is an interface type implemented by Object and all types which embed +// an Object. It is meant to be used as a type for function arguments which +// require GObjects or any subclasses thereof. +type IObject interface { + toGObject() *C.GObject + ToObject() *GObject +} + +// GObject is a representation of GLib's GObject. +type GObject struct { + ptr unsafe.Pointer +} + +func (v *GObject) Ptr() unsafe.Pointer { + return v.ptr +} + +func (v *GObject) native() *C.GObject { + if v == nil { + return nil + } + return (*C.GObject)(v.ptr) +} + +func (v *GObject) Ref() { + C.g_object_ref(C.gpointer(v.Ptr())) +} + +func (v *GObject) Unref() { + C.g_object_unref(C.gpointer(v.Ptr())) +} + +func (v *GObject) RefSink() { + C.g_object_ref_sink(C.gpointer(v.native())) +} + +func (v *GObject) IsFloating() bool { + c := C.g_object_is_floating(C.gpointer(v.native())) + return GoBool(GBoolean(c)) +} + +func (v *GObject) ForceFloating() { + C.g_object_force_floating(v.native()) +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go new file mode 100644 index 0000000000..05fd54a1a4 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +import ( + "unsafe" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +/* + * GOptionContext + */ + +type GOptionContext struct { + ptr unsafe.Pointer +} + +func (oc *GOptionContext) Ptr() unsafe.Pointer { + return oc.ptr +} + +func (oc *GOptionContext) native() *C.GOptionContext { + return (*C.GOptionContext)(oc.ptr) +} + +func ToGOptionContext(ptr unsafe.Pointer) GOptionContext { + return GOptionContext{ptr} +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go new file mode 100644 index 0000000000..6a6acfd9ab --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "fmt" + "unsafe" +) + +/* + * GVariant + */ + +type GVariant struct { + ptr unsafe.Pointer +} + +//func GVariantNew(p unsafe.Pointer) *GVariant { +//o := &GVariant{p} +//runtime.SetFinalizer(o, (*GVariant).Unref) +//return o; +//} + +//func GVariantNewSink(p unsafe.Pointer) *GVariant { +//o := &GVariant{p} +//runtime.SetFinalizer(o, (*GVariant).Unref) +//o.RefSink() +//return o; +//} + +func (v *GVariant) native() *C.GVariant { + return (*C.GVariant)(v.ptr) +} + +func (v *GVariant) Ptr() unsafe.Pointer { + if v == nil { + return nil + } + return v.ptr +} + +func (v *GVariant) Ref() { + C.g_variant_ref(v.native()) +} + +func (v *GVariant) Unref() { + C.g_variant_unref(v.native()) +} + +func (v *GVariant) RefSink() { + C.g_variant_ref_sink(v.native()) +} + +func (v *GVariant) TypeString() string { + cs := (*C.char)(C.g_variant_get_type_string(v.native())) + return C.GoString(cs) +} + +func (v *GVariant) GetChildValue(i int) *GVariant { + cchild := C.g_variant_get_child_value(v.native(), C.gsize(i)) + return (*GVariant)(unsafe.Pointer(cchild)) +} + +func (v *GVariant) LookupString(key string) (string, error) { + ckey := C.CString(key) + defer C.free(unsafe.Pointer(ckey)) + // TODO: Find a way to have constant C strings in golang + cstr := C._g_variant_lookup_string(v.native(), ckey) + if cstr == nil { + return "", fmt.Errorf("No such key: %s", key) + } + return C.GoString(cstr), nil +} + +func ToGVariant(ptr unsafe.Pointer) *GVariant { + return &GVariant{ptr} +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go new file mode 100644 index 0000000000..8b0236ff60 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go @@ -0,0 +1,119 @@ +// Package otbuiltin contains all of the basic commands for creating and +// interacting with an ostree repository +package otbuiltin + +import ( + "errors" + "fmt" + "runtime" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// Repo represents a local ostree repository +type Repo struct { + ptr unsafe.Pointer +} + +// isInitialized checks if the repo has been initialized +func (r *Repo) isInitialized() bool { + if r == nil || r.ptr == nil { + return false + } + return true +} + +// native converts an ostree repo struct to its C equivalent +func (r *Repo) native() *C.OstreeRepo { + if !r.isInitialized() { + return nil + } + return (*C.OstreeRepo)(r.ptr) +} + +// repoFromNative takes a C ostree repo and converts it to a Go struct +func repoFromNative(or *C.OstreeRepo) *Repo { + if or == nil { + return nil + } + r := &Repo{unsafe.Pointer(or)} + return r +} + +// OpenRepo attempts to open the repo at the given path +func OpenRepo(path string) (*Repo, error) { + if path == "" { + return nil, errors.New("empty path") + } + + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + repoPath := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(repoPath)) + crepo := C.ostree_repo_new(repoPath) + repo := repoFromNative(crepo) + + var cerr *C.GError + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, nil, &cerr))) + if !r { + return nil, generateError(cerr) + } + + return repo, nil +} + +// enableTombstoneCommits enables support for tombstone commits. +// +// This allows to distinguish between intentional deletions and accidental removals +// of commits. +func (r *Repo) enableTombstoneCommits() error { + if !r.isInitialized() { + return errors.New("repo not initialized") + } + + config := C.ostree_repo_get_config(r.native()) + groupC := C.CString("core") + defer C.free(unsafe.Pointer(groupC)) + keyC := C.CString("tombstone-commits") + defer C.free(unsafe.Pointer(keyC)) + valueC := C.g_key_file_get_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), nil) + tombstoneCommits := glib.GoBool(glib.GBoolean(valueC)) + + // tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file + if !tombstoneCommits { + var cerr *C.GError + C.g_key_file_set_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), C.TRUE) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(r.native(), config, &cerr))) { + return generateError(cerr) + } + } + return nil +} + +// generateError wraps a GLib error into a Go one. +func generateError(err *C.GError) error { + if err == nil { + return errors.New("nil GError") + } + + goErr := glib.ConvertGError(glib.ToGError(unsafe.Pointer(err))) + _, file, line, ok := runtime.Caller(1) + if ok { + return fmt.Errorf("%s:%d - %s", file, line, goErr) + } + return goErr +} + +// isOk wraps a gboolean return value into a bool. +// 0 is false/error, everything else is true/ok. +func isOk(v C.gboolean) bool { + return glib.GoBool(glib.GBoolean(v)) +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h new file mode 100644 index 0000000000..76171554d9 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h @@ -0,0 +1,179 @@ +#ifndef BUILTIN_GO_H +#define BUILTIN_GO_H + +#include +#include +#include +#include + +static guint32 owner_uid; +static guint32 owner_gid; + +static void +_ostree_repo_append_modifier_flags(OstreeRepoCommitModifierFlags *flags, int flag) { + *flags |= flag; +} + +struct CommitFilterData { + GHashTable *mode_adds; + GHashTable *skip_list; +}; + +typedef struct CommitFilterData CommitFilterData; + +static char* _gptr_to_str(gpointer p) +{ + return (char*)p; +} + +// The following 3 functions are wrapper functions for macros since CGO can't parse macros +static OstreeRepoFile* +_ostree_repo_file(GFile *file) +{ + return OSTREE_REPO_FILE (file); +} + +static gpointer +_guint_to_pointer (guint u) +{ + return GUINT_TO_POINTER (u); +} + +static const GVariantType* +_g_variant_type (char *type) +{ + return G_VARIANT_TYPE (type); +} + +static int +_at_fdcwd () +{ + return AT_FDCWD; +} + +static guint64 +_guint64_from_be (guint64 val) +{ + return GUINT64_FROM_BE (val); +} + + + +// These functions are wrappers for variadic functions since CGO can't parse variadic functions +static void +_g_printerr_onearg (char* msg, + char* arg) +{ + g_printerr("%s %s\n", msg, arg); +} + +static void +_g_set_error_onearg (GError *err, + char* msg, + char* arg) +{ + g_set_error(&err, G_IO_ERROR, G_IO_ERROR_FAILED, "%s %s", msg, arg); +} + +static void +_g_variant_builder_add_twoargs (GVariantBuilder* builder, + const char *format_string, + char *arg1, + GVariant *arg2) +{ + g_variant_builder_add(builder, format_string, arg1, arg2); +} + +static GHashTable* +_g_hash_table_new_full () +{ + return g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); +} + +static void +_g_variant_get_commit_dump (GVariant *variant, + const char *format, + char **subject, + char **body, + guint64 *timestamp) +{ + return g_variant_get (variant, format, NULL, NULL, NULL, subject, body, timestamp, NULL, NULL); +} + +static guint32 +_binary_or (guint32 a, guint32 b) +{ + return a | b; +} + +static void +_cleanup (OstreeRepo *self, + OstreeRepoCommitModifier *modifier, + GCancellable *cancellable, + GError **out_error) +{ + if (self) + ostree_repo_abort_transaction(self, cancellable, out_error); + if (modifier) + ostree_repo_commit_modifier_unref (modifier); +} + +// The following functions make up a commit_filter function that gets passed into +// another C function (and thus can't be a go function) as well as its helpers +static OstreeRepoCommitFilterResult +_commit_filter (OstreeRepo *self, + const char *path, + GFileInfo *file_info, + gpointer user_data) +{ + struct CommitFilterData *data = user_data; + GHashTable *mode_adds = data->mode_adds; + GHashTable *skip_list = data->skip_list; + gpointer value; + + if (owner_uid >= 0) + g_file_info_set_attribute_uint32 (file_info, "unix::uid", owner_uid); + if (owner_gid >= 0) + g_file_info_set_attribute_uint32 (file_info, "unix::gid", owner_gid); + + if (mode_adds && g_hash_table_lookup_extended (mode_adds, path, NULL, &value)) + { + guint current_mode = g_file_info_get_attribute_uint32 (file_info, "unix::mode"); + guint mode_add = GPOINTER_TO_UINT (value); + g_file_info_set_attribute_uint32 (file_info, "unix::mode", + current_mode | mode_add); + g_hash_table_remove (mode_adds, path); + } + + if (skip_list && g_hash_table_contains (skip_list, path)) + { + g_hash_table_remove (skip_list, path); + return OSTREE_REPO_COMMIT_FILTER_SKIP; + } + + return OSTREE_REPO_COMMIT_FILTER_ALLOW; +} + + +static void +_set_owner_uid (guint32 uid) +{ + owner_uid = uid; +} + +static void _set_owner_gid (guint32 gid) +{ + owner_gid = gid; +} + +// Wrapper function for a function that takes a C function as a parameter. +// That translation doesn't work in go +static OstreeRepoCommitModifier* +_ostree_repo_commit_modifier_new_wrapper (OstreeRepoCommitModifierFlags flags, + gpointer user_data, + GDestroyNotify destroy_notify) +{ + return ostree_repo_commit_modifier_new(flags, _commit_filter, user_data, destroy_notify); +} + +#endif diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go new file mode 100644 index 0000000000..299a882800 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go @@ -0,0 +1,111 @@ +package otbuiltin + +import ( + "errors" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// checkoutOptions defines all of the options for checking commits +// out of an ostree repo +// +// Note: while this is private, fields are public and part of the API. +type checkoutOptions struct { + // UserMode defines whether to checkout a repo in `bare-user` mode + UserMode bool + // Union specifies whether to overwrite existing filesystem entries + Union bool + // AllowNoEnt defines whether to skip filepaths that do not exist + AllowNoent bool + // DisableCache defines whether to disable internal repository uncompressed object cache + DisableCache bool + // Whiteouts defines whether to Process 'whiteout' (docker style) entries + Whiteouts bool + // RequireHardlinks defines whether to fall back to full copies if hard linking fails + RequireHardlinks bool + // SubPath specifies a sub-directory to use for checkout + Subpath string + // FromFile specifies an optional file containing many checkouts to process + FromFile string +} + +// NewCheckoutOptions instantiates and returns a checkoutOptions struct with default values set +func NewCheckoutOptions() checkoutOptions { + return checkoutOptions{} +} + +// Checkout checks out commit `commitRef` from a repository at `repoPath`, +// writing it to `destination`. Returns an error if the checkout could not be processed. +func Checkout(repoPath, destination, commitRef string, opts checkoutOptions) error { + var cancellable *glib.GCancellable + + ccommit := C.CString(commitRef) + defer C.free(unsafe.Pointer(ccommit)) + + var gerr = glib.NewGError() + cerr := (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + + repoPathc := C.g_file_new_for_path(C.CString(repoPath)) + defer C.g_object_unref(C.gpointer(repoPathc)) + crepo := C.ostree_repo_new(repoPathc) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + + // Multiple checkouts to process + if opts.FromFile != "" { + return processManyCheckouts(crepo, destination, cancellable) + } + + // Simple single checkout + var resolvedCommit *C.char + defer C.free(unsafe.Pointer(resolvedCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) { + return generateError(cerr) + } + + return processOneCheckout(crepo, resolvedCommit, destination, opts, cancellable) +} + +// processOneCheckout processes one checkout from the repo +func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, destination string, opts checkoutOptions, cancellable *glib.GCancellable) error { + cdest := C.CString(destination) + defer C.free(unsafe.Pointer(cdest)) + + var gerr = glib.NewGError() + cerr := (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + + // Process options into bitflags + var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions + if opts.UserMode { + repoCheckoutAtOptions.mode = C.OSTREE_REPO_CHECKOUT_MODE_USER + } + if opts.Union { + repoCheckoutAtOptions.overwrite_mode = C.OSTREE_REPO_CHECKOUT_OVERWRITE_UNION_FILES + } + if opts.RequireHardlinks { + repoCheckoutAtOptions.no_copy_fallback = C.TRUE + } + + // Checkout commit to destination + if !glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr))) { + return generateError(cerr) + } + + return nil +} + +// processManyCheckouts processes many checkouts in a single batch +func processManyCheckouts(crepo *C.OstreeRepo, target string, cancellable *glib.GCancellable) error { + return errors.New("batch checkouts processing: not implemented") +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go new file mode 100644 index 0000000000..ccaff7a105 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go @@ -0,0 +1,483 @@ +package otbuiltin + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// Declare global variable to store commitOptions +var options commitOptions + +// Declare a function prototype for being passed into another function +type handleLineFunc func(string, *glib.GHashTable) error + +// Contains all of the options for commmiting to an ostree repo. Initialize +// with NewCommitOptions() +type commitOptions struct { + Subject string // One line subject + Body string // Full description + Parent string // Parent of the commit + Tree []string // 'dir=PATH' or 'tar=TARFILE' or 'ref=COMMIT': overlay the given argument as a tree + AddMetadataString []string // Add a key/value pair to metadata + AddDetachedMetadataString []string // Add a key/value pair to detached metadata + OwnerUID int // Set file ownership to user id + OwnerGID int // Set file ownership to group id + NoXattrs bool // Do not import extended attributes + LinkCheckoutSpeedup bool // Optimize for commits of trees composed of hardlinks in the repository + TarAutoCreateParents bool // When loading tar archives, automatically create parent directories as needed + SkipIfUnchanged bool // If the contents are unchanged from a previous commit, do nothing + StatOverrideFile string // File containing list of modifications to make permissions + SkipListFile string // File containing list of file paths to skip + GenerateSizes bool // Generate size information along with commit metadata + GpgSign []string // GPG Key ID with which to sign the commit (if you have GPGME - GNU Privacy Guard Made Easy) + GpgHomedir string // GPG home directory to use when looking for keyrings (if you have GPGME - GNU Privacy Guard Made Easy) + Timestamp time.Time // Override the timestamp of the commit + Orphan bool // Commit does not belong to a branch + Fsync bool // Specify whether fsync should be used or not. Default to true +} + +// Initializes a commitOptions struct and sets default values +func NewCommitOptions() commitOptions { + var co commitOptions + co.OwnerUID = -1 + co.OwnerGID = -1 + co.Fsync = true + return co +} + +type OstreeRepoTransactionStats struct { + metadata_objects_total int32 + metadata_objects_written int32 + content_objects_total int32 + content_objects_written int32 + content_bytes_written uint64 +} + +func (repo *Repo) PrepareTransaction() (bool, error) { + var cerr *C.GError = nil + var resume C.gboolean + + r := glib.GoBool(glib.GBoolean(C.ostree_repo_prepare_transaction(repo.native(), &resume, nil, &cerr))) + if !r { + return false, generateError(cerr) + } + return glib.GoBool(glib.GBoolean(resume)), nil +} + +func (repo *Repo) CommitTransaction() (*OstreeRepoTransactionStats, error) { + var cerr *C.GError = nil + var stats OstreeRepoTransactionStats = OstreeRepoTransactionStats{} + statsPtr := (*C.OstreeRepoTransactionStats)(unsafe.Pointer(&stats)) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_commit_transaction(repo.native(), statsPtr, nil, &cerr))) + if !r { + return nil, generateError(cerr) + } + return &stats, nil +} + +func (repo *Repo) TransactionSetRef(remote string, ref string, checksum string) { + var cRemote *C.char = nil + var cRef *C.char = nil + var cChecksum *C.char = nil + + if remote != "" { + cRemote = C.CString(remote) + } + if ref != "" { + cRef = C.CString(ref) + } + if checksum != "" { + cChecksum = C.CString(checksum) + } + C.ostree_repo_transaction_set_ref(repo.native(), cRemote, cRef, cChecksum) +} + +func (repo *Repo) AbortTransaction() error { + var cerr *C.GError = nil + r := glib.GoBool(glib.GBoolean(C.ostree_repo_abort_transaction(repo.native(), nil, &cerr))) + if !r { + return generateError(cerr) + } + return nil +} + +func (repo *Repo) RegenerateSummary() error { + var cerr *C.GError = nil + r := glib.GoBool(glib.GBoolean(C.ostree_repo_regenerate_summary(repo.native(), nil, nil, &cerr))) + if !r { + return generateError(cerr) + } + return nil +} + +// Commits a directory, specified by commitPath, to an ostree repo as a given branch +func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string, error) { + // TODO(lucab): `options` is global un-synchronized mutable state, get rid of it. + options = opts + + var err error + var modeAdds *glib.GHashTable + var skipList *glib.GHashTable + var objectToCommit *glib.GFile + var skipCommit bool = false + var ccommitChecksum *C.char + defer C.free(unsafe.Pointer(ccommitChecksum)) + var flags C.OstreeRepoCommitModifierFlags = 0 + var filter_data C.CommitFilterData + + var cerr *C.GError + defer C.free(unsafe.Pointer(cerr)) + var metadata *C.GVariant = nil + defer func() { + if metadata != nil { + defer C.g_variant_unref(metadata) + } + }() + + var detachedMetadata *C.GVariant = nil + defer C.free(unsafe.Pointer(detachedMetadata)) + var mtree *C.OstreeMutableTree + defer C.free(unsafe.Pointer(mtree)) + var root *C.GFile + defer C.free(unsafe.Pointer(root)) + var modifier *C.OstreeRepoCommitModifier + defer C.free(unsafe.Pointer(modifier)) + var cancellable *C.GCancellable + defer C.free(unsafe.Pointer(cancellable)) + + cpath := C.CString(commitPath) + defer C.free(unsafe.Pointer(cpath)) + csubject := C.CString(options.Subject) + defer C.free(unsafe.Pointer(csubject)) + cbody := C.CString(options.Body) + defer C.free(unsafe.Pointer(cbody)) + cbranch := C.CString(branch) + defer C.free(unsafe.Pointer(cbranch)) + cparent := C.CString(options.Parent) + defer C.free(unsafe.Pointer(cparent)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) { + goto out + } + + // If the user provided a stat override file + if strings.Compare(options.StatOverrideFile, "") != 0 { + modeAdds = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full())) + if err = parseFileByLine(options.StatOverrideFile, handleStatOverrideLine, modeAdds, cancellable); err != nil { + goto out + } + } + + // If the user provided a skiplist file + if strings.Compare(options.SkipListFile, "") != 0 { + skipList = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full())) + if err = parseFileByLine(options.SkipListFile, handleSkipListline, skipList, cancellable); err != nil { + goto out + } + } + + if options.AddMetadataString != nil { + metadata, err = parseKeyValueStrings(options.AddMetadataString) + if err != nil { + goto out + } + } + + if options.AddDetachedMetadataString != nil { + _, err = parseKeyValueStrings(options.AddDetachedMetadataString) + if err != nil { + goto out + } + } + + if strings.Compare(branch, "") == 0 && !options.Orphan { + err = errors.New("A branch must be specified or use commitOptions.Orphan") + goto out + } + + if options.NoXattrs { + C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_SKIP_XATTRS) + } + if options.GenerateSizes { + C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_GENERATE_SIZES) + } + if !options.Fsync { + C.ostree_repo_set_disable_fsync(repo.native(), C.TRUE) + } + + if flags != 0 || options.OwnerUID >= 0 || options.OwnerGID >= 0 || strings.Compare(options.StatOverrideFile, "") != 0 || options.NoXattrs { + filter_data.mode_adds = (*C.GHashTable)(modeAdds.Ptr()) + filter_data.skip_list = (*C.GHashTable)(skipList.Ptr()) + C._set_owner_uid((C.guint32)(options.OwnerUID)) + C._set_owner_gid((C.guint32)(options.OwnerGID)) + modifier = C._ostree_repo_commit_modifier_new_wrapper(flags, C.gpointer(&filter_data), nil) + } + + if strings.Compare(options.Parent, "") != 0 { + if strings.Compare(options.Parent, "none") == 0 { + options.Parent = "" + } + } else if !options.Orphan { + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.TRUE, &cparent, &cerr))) { + goto out + } + } + + if options.LinkCheckoutSpeedup && !glib.GoBool(glib.GBoolean(C.ostree_repo_scan_hardlinks(repo.native(), cancellable, &cerr))) { + goto out + } + + mtree = C.ostree_mutable_tree_new() + + if len(commitPath) == 0 && (len(options.Tree) == 0 || len(options.Tree[0]) == 0) { + currentDir := (*C.char)(C.g_get_current_dir()) + objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(currentDir))) + C.g_free(C.gpointer(currentDir)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { + goto out + } + } else if len(options.Tree) != 0 { + var eq int = -1 + cerr = nil + for tree := range options.Tree { + eq = strings.Index(options.Tree[tree], "=") + if eq == -1 { + C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(options.Tree[tree])) + goto out + } + treeType := options.Tree[tree][:eq] + treeVal := options.Tree[tree][eq+1:] + + if strings.Compare(treeType, "dir") == 0 { + objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal)))) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { + goto out + } + } else if strings.Compare(treeType, "tar") == 0 { + objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal)))) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_archive_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, (C.gboolean)(glib.GBool(opts.TarAutoCreateParents)), cancellable, &cerr))) { + fmt.Println("error 1") + goto out + } + } else if strings.Compare(treeType, "ref") == 0 { + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), C.CString(treeVal), (**C.GFile)(objectToCommit.Ptr()), nil, cancellable, &cerr))) { + goto out + } + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { + goto out + } + } else { + C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(treeVal)) + goto out + } + } + } else { + objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(cpath))) + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { + goto out + } + } + + if modeAdds != nil && C.g_hash_table_size((*C.GHashTable)(modeAdds.Ptr())) > 0 { + var hashIter *C.GHashTableIter + + var key, value C.gpointer + + C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(modeAdds.Ptr())) + + for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) { + C._g_printerr_onearg(C.CString("Unmatched StatOverride path: "), C._gptr_to_str(key)) + } + err = errors.New("Unmatched StatOverride paths") + C.free(unsafe.Pointer(hashIter)) + C.free(unsafe.Pointer(key)) + C.free(unsafe.Pointer(value)) + goto out + } + + if skipList != nil && C.g_hash_table_size((*C.GHashTable)(skipList.Ptr())) > 0 { + var hashIter *C.GHashTableIter + var key, value C.gpointer + + C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(skipList.Ptr())) + + for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) { + C._g_printerr_onearg(C.CString("Unmatched SkipList path: "), C._gptr_to_str(key)) + } + err = errors.New("Unmatched SkipList paths") + C.free(unsafe.Pointer(hashIter)) + C.free(unsafe.Pointer(key)) + C.free(unsafe.Pointer(value)) + goto out + } + + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_mtree(repo.native(), mtree, &root, cancellable, &cerr))) { + goto out + } + + if options.SkipIfUnchanged && strings.Compare(options.Parent, "") != 0 { + var parentRoot *C.GFile + + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), cparent, &parentRoot, nil, cancellable, &cerr))) { + C.free(unsafe.Pointer(parentRoot)) + goto out + } + + if glib.GoBool(glib.GBoolean(C.g_file_equal(root, parentRoot))) { + skipCommit = true + } + C.free(unsafe.Pointer(parentRoot)) + } + + if !skipCommit { + var timestamp C.guint64 + + if options.Timestamp.IsZero() { + var now *C.GDateTime = C.g_date_time_new_now_utc() + timestamp = (C.guint64)(C.g_date_time_to_unix(now)) + C.g_date_time_unref(now) + + cerr = nil + ret := C.ostree_repo_write_commit(repo.native(), cparent, csubject, cbody, metadata, C._ostree_repo_file(root), &ccommitChecksum, cancellable, &cerr) + if !glib.GoBool(glib.GBoolean(ret)) { + goto out + } + } else { + timestamp = (C.guint64)(options.Timestamp.Unix()) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_commit_with_time(repo.native(), cparent, csubject, cbody, + metadata, C._ostree_repo_file(root), timestamp, &ccommitChecksum, cancellable, &cerr))) { + goto out + } + } + + if detachedMetadata != nil { + C.ostree_repo_write_commit_detached_metadata(repo.native(), ccommitChecksum, detachedMetadata, cancellable, &cerr) + } + + if len(options.GpgSign) != 0 { + for key := range options.GpgSign { + if !glib.GoBool(glib.GBoolean(C.ostree_repo_sign_commit(repo.native(), (*C.gchar)(ccommitChecksum), (*C.gchar)(C.CString(options.GpgSign[key])), (*C.gchar)(C.CString(options.GpgHomedir)), cancellable, &cerr))) { + goto out + } + } + } + + if strings.Compare(branch, "") != 0 { + C.ostree_repo_transaction_set_ref(repo.native(), nil, cbranch, ccommitChecksum) + } else if !options.Orphan { + goto out + } else { + // TODO: Looks like I forgot to implement this. + } + } else { + ccommitChecksum = C.CString(options.Parent) + } + + return C.GoString(ccommitChecksum), nil +out: + if repo.native() != nil { + C.ostree_repo_abort_transaction(repo.native(), cancellable, nil) + //C.free(unsafe.Pointer(repo.native())) + } + if modifier != nil { + C.ostree_repo_commit_modifier_unref(modifier) + } + if err != nil { + return "", err + } + return "", generateError(cerr) +} + +// Parse an array of key value pairs of the format KEY=VALUE and add them to a GVariant +func parseKeyValueStrings(pairs []string) (*C.GVariant, error) { + builder := C.g_variant_builder_new(C._g_variant_type(C.CString("a{sv}"))) + defer C.g_variant_builder_unref(builder) + + for iter := range pairs { + index := strings.Index(pairs[iter], "=") + if index <= 0 { + var buffer bytes.Buffer + buffer.WriteString("Missing '=' in KEY=VALUE metadata '%s'") + buffer.WriteString(pairs[iter]) + return nil, errors.New(buffer.String()) + } + + key := C.CString(pairs[iter][:index]) + value := C.CString(pairs[iter][index+1:]) + + valueVariant := C.g_variant_new_string((*C.gchar)(value)) + + C._g_variant_builder_add_twoargs(builder, C.CString("{sv}"), key, valueVariant) + } + + metadata := C.g_variant_builder_end(builder) + return C.g_variant_ref_sink(metadata), nil +} + +// Parse a file linue by line and handle the line with the handleLineFunc +func parseFileByLine(path string, fn handleLineFunc, table *glib.GHashTable, cancellable *C.GCancellable) error { + var contents *C.char + var file *glib.GFile + var lines []string + var gerr = glib.NewGError() + cerr := (*C.GError)(gerr.Ptr()) + + file = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(path)))) + if !glib.GoBool(glib.GBoolean(C.g_file_load_contents((*C.GFile)(file.Ptr()), cancellable, &contents, nil, nil, &cerr))) { + return generateError(cerr) + } + + lines = strings.Split(C.GoString(contents), "\n") + for line := range lines { + if strings.Compare(lines[line], "") == 0 { + continue + } + + if err := fn(lines[line], table); err != nil { + return generateError(cerr) + } + } + return nil +} + +// Handle an individual line from a Statoverride file +func handleStatOverrideLine(line string, table *glib.GHashTable) error { + var space int + var modeAdd C.guint + + if space = strings.IndexRune(line, ' '); space == -1 { + return errors.New("Malformed StatOverrideFile (no space found)") + } + + modeAdd = (C.guint)(C.g_ascii_strtod((*C.gchar)(C.CString(line)), nil)) + C.g_hash_table_insert((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line[space+1:])))), C._guint_to_pointer(modeAdd)) + + return nil +} + +// Handle an individual line from a Skiplist file +func handleSkipListline(line string, table *glib.GHashTable) error { + C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line))))) + + return nil +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go new file mode 100644 index 0000000000..6ee6671b48 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go @@ -0,0 +1,84 @@ +package otbuiltin + +import ( + "strings" + "unsafe" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// initOptions contains all of the options for initializing an ostree repo +// +// Note: while this is private, exported fields are public and part of the API. +type initOptions struct { + // Mode defines repository mode: either bare, archive-z2, or bare-user + Mode string +} + +// NewInitOptions instantiates and returns an initOptions struct with default values set +func NewInitOptions() initOptions { + return initOptions{ + Mode: "bare", + } +} + +// Init initializes a new ostree repository at the given path. Returns true +// if the repo exists at the location, regardless of whether it was initialized +// by the function or if it already existed. Returns an error if the repo could +// not be initialized +func Init(path string, options initOptions) (bool, error) { + repoMode, err := parseRepoMode(options.Mode) + if err != nil { + return false, err + } + + // Create a repo struct from the path + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + + // If the repo exists in the filesystem, return an error but set exists to true + /* var exists C.gboolean = 0 + success := glib.GoBool(glib.GBoolean(C.ostree_repo_exists(crepo, &exists, &cerr))) + if exists != 0 { + err = errors.New("repository already exists") + return true, err + } else if !success { + return false, generateError(cerr) + }*/ + + var cErr *C.GError + defer C.free(unsafe.Pointer(cErr)) + if r := C.ostree_repo_create(repo, repoMode, nil, &cErr); !isOk(r) { + err := generateError(cErr) + if strings.Contains(err.Error(), "File exists") { + return true, err + } + return false, err + } + return true, nil +} + +// parseRepoMode converts a mode string to a C.OSTREE_REPO_MODE enum value +func parseRepoMode(modeLabel string) (C.OstreeRepoMode, error) { + var cErr *C.GError + defer C.free(unsafe.Pointer(cErr)) + + cModeLabel := C.CString(modeLabel) + defer C.free(unsafe.Pointer(cModeLabel)) + + var retMode C.OstreeRepoMode + if r := C.ostree_repo_mode_from_string(cModeLabel, &retMode, &cErr); !isOk(r) { + // NOTE(lucab): zero-value for this C enum has no special/invalid meaning. + return C.OSTREE_REPO_MODE_BARE, generateError(cErr) + } + + return retMode, nil +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go new file mode 100644 index 0000000000..d57498215e --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go @@ -0,0 +1,163 @@ +package otbuiltin + +import ( + "fmt" + "time" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// LogEntry is a struct for the various pieces of data in a log entry +type LogEntry struct { + Checksum []byte + Variant []byte + Timestamp time.Time + Subject string + Body string +} + +// Convert the log entry to a string +func (l LogEntry) String() string { + if len(l.Variant) == 0 { + return fmt.Sprintf("%s\n%s\n\n\t%s\n\n\t%s\n\n", l.Checksum, l.Timestamp, l.Subject, l.Body) + } + return fmt.Sprintf("%s\n%s\n\n", l.Checksum, l.Variant) +} + +type ostreeDumpFlags uint + +const ( + ostreeDumpNone ostreeDumpFlags = 0 + ostreeDumpRaw ostreeDumpFlags = 1 << iota +) + +// logOptions contains all of the options for initializing an ostree repo +type logOptions struct { + // Raw determines whether to show raw variant data + Raw bool +} + +// NewLogOptions instantiates and returns a logOptions struct with default values set +func NewLogOptions() logOptions { + return logOptions{} +} + +// Log shows the logs of a branch starting with a given commit or ref. Returns a +// slice of log entries on success and an error otherwise +func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) { + // attempt to open the repository + repo, err := OpenRepo(repoPath) + if err != nil { + return nil, err + } + + cbranch := C.CString(branch) + defer C.free(unsafe.Pointer(cbranch)) + var checksum *C.char + defer C.free(unsafe.Pointer(checksum)) + var cerr *C.GError + defer C.free(unsafe.Pointer(cerr)) + + flags := ostreeDumpNone + if options.Raw { + flags |= ostreeDumpRaw + } + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.FALSE, &checksum, &cerr))) { + return nil, generateError(cerr) + } + + return logCommit(repo, checksum, false, flags) +} + +func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags ostreeDumpFlags) ([]LogEntry, error) { + var variant *C.GVariant + var gerr = glib.NewGError() + var cerr = (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, &variant, &cerr))) { + if isRecursive && glib.GoBool(glib.GBoolean(C.g_error_matches(cerr, C.g_io_error_quark(), C.G_IO_ERROR_NOT_FOUND))) { + return nil, nil + } + return nil, generateError(cerr) + } + + // Get the parent of this commit + parent := (*C.char)(C.ostree_commit_get_parent(variant)) + defer C.free(unsafe.Pointer(parent)) + + entries := make([]LogEntry, 0, 1) + if parent != nil { + var err error + entries, err = logCommit(repo, parent, true, flags) + if err != nil { + return nil, err + } + } + + nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags) + entries = append(entries, nextLogEntry) + + return entries, nil +} + +func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags ostreeDumpFlags) LogEntry { + csum := []byte(C.GoString(checksum)) + + if (flags & ostreeDumpRaw) != 0 { + return dumpVariant(variant, csum) + } + + switch objectType { + case C.OSTREE_OBJECT_TYPE_COMMIT: + return dumpCommit(variant, flags, csum) + default: + return LogEntry{ + Checksum: csum, + } + } +} + +func dumpVariant(variant *C.GVariant, csum []byte) LogEntry { + var logVariant []byte + if C.G_BYTE_ORDER != C.G_BIG_ENDIAN { + byteswappedVariant := C.g_variant_byteswap(variant) + logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE)))) + } else { + logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(variant, C.TRUE)))) + } + + return LogEntry{ + Checksum: csum, + Variant: logVariant, + } +} + +func dumpCommit(variant *C.GVariant, flags ostreeDumpFlags, csum []byte) LogEntry { + var subject *C.char + defer C.free(unsafe.Pointer(subject)) + var body *C.char + defer C.free(unsafe.Pointer(body)) + var timeBigE C.guint64 + + C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timeBigE) + + // Translate to a host-endian epoch and convert to Go timestamp + timeHostE := C._guint64_from_be(timeBigE) + timestamp := time.Unix((int64)(timeHostE), 0) + + return LogEntry{ + Timestamp: timestamp, + Subject: C.GoString(subject), + Body: C.GoString(body), + } +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go new file mode 100644 index 0000000000..532522fc59 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go @@ -0,0 +1,217 @@ +package otbuiltin + +import ( + "bytes" + "errors" + "strconv" + "strings" + "time" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// Declare gobal variable for options +var pruneOpts pruneOptions + +// Contains all of the options for pruning an ostree repo. Use +// NewPruneOptions() to initialize +type pruneOptions struct { + NoPrune bool // Only display unreachable objects; don't delete + RefsOnly bool // Only compute reachability via refs + DeleteCommit string // Specify a commit to delete + KeepYoungerThan time.Time // All commits older than this date will be pruned + Depth int // Only traverse depths (integer) parents for each commit (default: -1=infinite) + StaticDeltasOnly int // Change the behavior of --keep-younger-than and --delete-commit to prune only the static delta files +} + +// Instantiates and returns a pruneOptions struct with default values set +func NewPruneOptions() pruneOptions { + po := new(pruneOptions) + po.Depth = -1 + return *po +} + +// Search for unreachable objects in the repository given by repoPath. Removes the +// objects unless pruneOptions.NoPrune is specified +func Prune(repoPath string, options pruneOptions) (string, error) { + pruneOpts = options + // attempt to open the repository + repo, err := OpenRepo(repoPath) + if err != nil { + return "", err + } + + var pruneFlags C.OstreeRepoPruneFlags + var numObjectsTotal int + var numObjectsPruned int + var objSizeTotal uint64 + var gerr = glib.NewGError() + var cerr = (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + var cancellable *glib.GCancellable + + if !pruneOpts.NoPrune && !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) { + return "", generateError(cerr) + } + + cerr = nil + if strings.Compare(pruneOpts.DeleteCommit, "") != 0 { + if pruneOpts.NoPrune { + return "", errors.New("Cannot specify both pruneOptions.DeleteCommit and pruneOptions.NoPrune") + } + + if pruneOpts.StaticDeltasOnly > 0 { + if glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), C.CString(pruneOpts.DeleteCommit), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return "", generateError(cerr) + } + } else if err = deleteCommit(repo, pruneOpts.DeleteCommit, cancellable); err != nil { + return "", err + } + } + + if !pruneOpts.KeepYoungerThan.IsZero() { + if pruneOpts.NoPrune { + return "", errors.New("Cannot specify both pruneOptions.KeepYoungerThan and pruneOptions.NoPrune") + } + + if err = pruneCommitsKeepYoungerThanDate(repo, pruneOpts.KeepYoungerThan, cancellable); err != nil { + return "", err + } + } + + if pruneOpts.RefsOnly { + pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_REFS_ONLY + } + if pruneOpts.NoPrune { + pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_NO_PRUNE + } + + formattedFreedSize := C.GoString((*C.char)(C.g_format_size_full((C.guint64)(objSizeTotal), 0))) + + var buffer bytes.Buffer + + buffer.WriteString("Total objects: ") + buffer.WriteString(strconv.Itoa(numObjectsTotal)) + if numObjectsPruned == 0 { + buffer.WriteString("\nNo unreachable objects") + } else if pruneOpts.NoPrune { + buffer.WriteString("\nWould delete: ") + buffer.WriteString(strconv.Itoa(numObjectsPruned)) + buffer.WriteString(" objects, freeing ") + buffer.WriteString(formattedFreedSize) + } else { + buffer.WriteString("\nDeleted ") + buffer.WriteString(strconv.Itoa(numObjectsPruned)) + buffer.WriteString(" objects, ") + buffer.WriteString(formattedFreedSize) + buffer.WriteString(" freed") + } + + return buffer.String(), nil +} + +// Delete an unreachable commit from the repo +func deleteCommit(repo *Repo, commitToDelete string, cancellable *glib.GCancellable) error { + var refs *glib.GHashTable + var hashIter glib.GHashTableIter + var hashkey, hashvalue C.gpointer + var gerr = glib.NewGError() + var cerr = (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + + if glib.GoBool(glib.GBoolean(C.ostree_repo_list_refs(repo.native(), nil, (**C.GHashTable)(refs.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + + C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(refs.Ptr())) + for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &hashkey, &hashvalue) != 0 { + var ref string = C.GoString((*C.char)(hashkey)) + var commit string = C.GoString((*C.char)(hashvalue)) + if strings.Compare(commitToDelete, commit) == 0 { + var buffer bytes.Buffer + buffer.WriteString("Commit ") + buffer.WriteString(commitToDelete) + buffer.WriteString(" is referenced by ") + buffer.WriteString(ref) + return errors.New(buffer.String()) + } + } + + if err := repo.enableTombstoneCommits(); err != nil { + return err + } + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, C.CString(commitToDelete), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + + return nil +} + +// Prune commits but keep any younger than the given date regardless of whether they +// are reachable +func pruneCommitsKeepYoungerThanDate(repo *Repo, date time.Time, cancellable *glib.GCancellable) error { + var objects *glib.GHashTable + defer C.free(unsafe.Pointer(objects)) + var hashIter glib.GHashTableIter + var key, value C.gpointer + defer C.free(unsafe.Pointer(key)) + defer C.free(unsafe.Pointer(value)) + var gerr = glib.NewGError() + var cerr = (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + + if err := repo.enableTombstoneCommits(); err != nil { + return err + } + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_list_objects(repo.native(), C.OSTREE_REPO_LIST_OBJECTS_ALL, (**C.GHashTable)(objects.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + + C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(objects.Ptr())) + for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &key, &value) != 0 { + var serializedKey *glib.GVariant + defer C.free(unsafe.Pointer(serializedKey)) + var checksum *C.char + defer C.free(unsafe.Pointer(checksum)) + var objType C.OstreeObjectType + var commitTimestamp uint64 + var commit *glib.GVariant = nil + + C.ostree_object_name_deserialize((*C.GVariant)(serializedKey.Ptr()), &checksum, &objType) + + if objType != C.OSTREE_OBJECT_TYPE_COMMIT { + continue + } + + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (**C.GVariant)(commit.Ptr()), &cerr))) { + return generateError(cerr) + } + + commitTimestamp = (uint64)(C.ostree_commit_get_timestamp((*C.GVariant)(commit.Ptr()))) + if commitTimestamp < (uint64)(date.Unix()) { + cerr = nil + if pruneOpts.StaticDeltasOnly != 0 { + if !glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + } else { + if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/LICENSE.md b/vendor/github.com/sylabs/sif/v2/LICENSE.md new file mode 100644 index 0000000000..9e825f695e --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/LICENSE.md @@ -0,0 +1,29 @@ +# LICENSE + +Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go new file mode 100644 index 0000000000..d6f667378a --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go @@ -0,0 +1,72 @@ +// Copyright (c) 2021-2022, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +var ( + hdrArchUnknown archType = [...]byte{'0', '0', '\x00'} + hdrArch386 archType = [...]byte{'0', '1', '\x00'} + hdrArchAMD64 archType = [...]byte{'0', '2', '\x00'} + hdrArchARM archType = [...]byte{'0', '3', '\x00'} + hdrArchARM64 archType = [...]byte{'0', '4', '\x00'} + hdrArchPPC64 archType = [...]byte{'0', '5', '\x00'} + hdrArchPPC64le archType = [...]byte{'0', '6', '\x00'} + hdrArchMIPS archType = [...]byte{'0', '7', '\x00'} + hdrArchMIPSle archType = [...]byte{'0', '8', '\x00'} + hdrArchMIPS64 archType = [...]byte{'0', '9', '\x00'} + hdrArchMIPS64le archType = [...]byte{'1', '0', '\x00'} + hdrArchS390x archType = [...]byte{'1', '1', '\x00'} + hdrArchRISCV64 archType = [...]byte{'1', '2', '\x00'} +) + +type archType [3]byte + +// getSIFArch returns the archType corresponding to go runtime arch. +func getSIFArch(arch string) archType { + archMap := map[string]archType{ + "386": hdrArch386, + "amd64": hdrArchAMD64, + "arm": hdrArchARM, + "arm64": hdrArchARM64, + "ppc64": hdrArchPPC64, + "ppc64le": hdrArchPPC64le, + "mips": hdrArchMIPS, + "mipsle": hdrArchMIPSle, + "mips64": hdrArchMIPS64, + "mips64le": hdrArchMIPS64le, + "s390x": hdrArchS390x, + "riscv64": hdrArchRISCV64, + } + + t, ok := archMap[arch] + if !ok { + return hdrArchUnknown + } + return t +} + +// GoArch returns the go runtime arch corresponding to t. +func (t archType) GoArch() string { + archMap := map[archType]string{ + hdrArch386: "386", + hdrArchAMD64: "amd64", + hdrArchARM: "arm", + hdrArchARM64: "arm64", + hdrArchPPC64: "ppc64", + hdrArchPPC64le: "ppc64le", + hdrArchMIPS: "mips", + hdrArchMIPSle: "mipsle", + hdrArchMIPS64: "mips64", + hdrArchMIPS64le: "mips64le", + hdrArchS390x: "s390x", + hdrArchRISCV64: "riscv64", + } + + arch, ok := archMap[t] + if !ok { + arch = "unknown" + } + return arch +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go new file mode 100644 index 0000000000..1d73a4750e --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go @@ -0,0 +1,103 @@ +// Copyright (c) 2021-2022, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "io" +) + +// A Buffer is a variable-sized buffer of bytes that implements the sif.ReadWriter interface. The +// zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte + pos int64 +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial contents. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +var errNegativeOffset = errors.New("negative offset") + +// ReadAt implements the io.ReaderAt interface. +func (b *Buffer) ReadAt(p []byte, off int64) (int, error) { + if off < 0 { + return 0, errNegativeOffset + } + + if off >= int64(len(b.buf)) { + return 0, io.EOF + } + + n := copy(p, b.buf[off:]) + if n < len(p) { + return n, io.EOF + } + return n, nil +} + +var errNegativePosition = errors.New("negative position") + +// Write implements the io.Writer interface. +func (b *Buffer) Write(p []byte) (int, error) { + if b.pos < 0 { + return 0, errNegativePosition + } + + if have, need := int64(len(b.buf))-b.pos, int64(len(p)); have < need { + b.buf = append(b.buf, make([]byte, need-have)...) + } + + n := copy(b.buf[b.pos:], p) + b.pos += int64(n) + return n, nil +} + +var errInvalidWhence = errors.New("invalid whence") + +// Seek implements the io.Seeker interface. +func (b *Buffer) Seek(offset int64, whence int) (int64, error) { + var abs int64 + + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = b.pos + offset + case io.SeekEnd: + abs = int64(len(b.buf)) + offset + default: + return 0, errInvalidWhence + } + + if abs < 0 { + return 0, errNegativePosition + } + + b.pos = abs + return abs, nil +} + +var errTruncateRange = errors.New("truncation out of range") + +// Truncate discards all but the first n bytes from the buffer. +func (b *Buffer) Truncate(n int64) error { + if n < 0 || n > int64(len(b.buf)) { + return errTruncateRange + } + + b.buf = b.buf[:n] + return nil +} + +// Bytes returns the contents of the buffer. The slice is valid for use only until the next buffer +// modification (that is, only until the next call to a method like ReadAt, Write, or Truncate). +func (b *Buffer) Bytes() []byte { return b.buf } + +// Len returns the number of bytes in the buffer. +func (b *Buffer) Len() int64 { return int64(len(b.buf)) } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go new file mode 100644 index 0000000000..be3f6dc10f --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -0,0 +1,698 @@ +// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/google/uuid" +) + +// nextAligned finds the next offset that satisfies alignment. +func nextAligned(offset int64, alignment int) int64 { + align64 := uint64(alignment) + offset64 := uint64(offset) + + if align64 != 0 && offset64%align64 != 0 { + offset64 = (offset64 & ^(align64 - 1)) + align64 + } + + return int64(offset64) +} + +// writeDataObjectAt writes the data object described by di to ws, using time t, recording details +// in d. The object is written at the first position that satisfies the alignment requirements +// described by di following offsetUnaligned. +func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll + offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart) + if err != nil { + return err + } + + n, err := io.Copy(ws, di.r) + if err != nil { + return err + } + + if err := di.fillDescriptor(t, d); err != nil { + return err + } + d.Used = true + d.Offset = offset + d.Size = n + d.SizeWithPadding = offset - offsetUnaligned + n + + return nil +} + +var ( + errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image") + errPrimaryPartition = errors.New("image already contains a primary partition") +) + +// writeDataObject writes the data object described by di to f, using time t, recording details in +// the descriptor at index i. +func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) error { + if i >= len(f.rds) { + return errInsufficientCapacity + } + + // If this is a primary partition, verify there isn't another primary partition, and update the + // architecture in the global header. + if p, ok := di.opts.md.(partition); ok && p.Parttype == PartPrimSys { + if ds, err := f.GetDescriptors(WithPartitionType(PartPrimSys)); err == nil && len(ds) > 0 { + return errPrimaryPartition + } + + f.h.Arch = p.Arch + } + + d := &f.rds[i] + d.ID = uint32(i) + 1 + + if err := writeDataObjectAt(f.rw, f.h.DataOffset+f.h.DataSize, di, t, d); err != nil { + return err + } + + // Update minimum object ID map. + if minID, ok := f.minIDs[d.GroupID]; !ok || d.ID < minID { + f.minIDs[d.GroupID] = d.ID + } + + f.h.DescriptorsFree-- + f.h.DataSize += d.SizeWithPadding + + return nil +} + +// writeDescriptors writes the descriptors in f to backing storage. +func (f *FileImage) writeDescriptors() error { + if _, err := f.rw.Seek(f.h.DescriptorsOffset, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.rds) +} + +// writeHeader writes the global header in f to backing storage. +func (f *FileImage) writeHeader() error { + if _, err := f.rw.Seek(0, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.h) +} + +// createOpts accumulates container creation options. +type createOpts struct { + launchScript [hdrLaunchLen]byte + id uuid.UUID + descriptorsOffset int64 + descriptorCapacity int64 + dis []DescriptorInput + t time.Time + closeOnUnload bool +} + +// CreateOpt are used to specify container creation options. +type CreateOpt func(*createOpts) error + +var errLaunchScriptLen = errors.New("launch script too large") + +// OptCreateWithLaunchScript specifies s as the launch script. +func OptCreateWithLaunchScript(s string) CreateOpt { + return func(co *createOpts) error { + b := []byte(s) + + if len(b) >= len(co.launchScript) { + return errLaunchScriptLen + } + + copy(co.launchScript[:], b) + + return nil + } +} + +// OptCreateDeterministic sets header/descriptor fields to values that support deterministic +// creation of images. +func OptCreateDeterministic() CreateOpt { + return func(co *createOpts) error { + co.id = uuid.Nil + co.t = time.Time{} + return nil + } +} + +// OptCreateWithID specifies id as the unique ID. +func OptCreateWithID(id string) CreateOpt { + return func(co *createOpts) error { + id, err := uuid.Parse(id) + co.id = id + return err + } +} + +// OptCreateWithDescriptorCapacity specifies that the created image should have the capacity for a +// maximum of n descriptors. +func OptCreateWithDescriptorCapacity(n int64) CreateOpt { + return func(co *createOpts) error { + co.descriptorCapacity = n + return nil + } +} + +// OptCreateWithDescriptors appends dis to the list of descriptors. +func OptCreateWithDescriptors(dis ...DescriptorInput) CreateOpt { + return func(co *createOpts) error { + co.dis = append(co.dis, dis...) + return nil + } +} + +// OptCreateWithTime specifies t as the image creation time. +func OptCreateWithTime(t time.Time) CreateOpt { + return func(co *createOpts) error { + co.t = t + return nil + } +} + +// OptCreateWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptCreateWithCloseOnUnload(b bool) CreateOpt { + return func(co *createOpts) error { + co.closeOnUnload = b + return nil + } +} + +// createContainer creates a new SIF container file in rw, according to opts. +func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) { + rds := make([]rawDescriptor, co.descriptorCapacity) + rdsSize := int64(binary.Size(rds)) + + h := header{ + LaunchScript: co.launchScript, + Magic: hdrMagic, + Version: CurrentVersion.bytes(), + Arch: hdrArchUnknown, + ID: co.id, + CreatedAt: co.t.Unix(), + ModifiedAt: co.t.Unix(), + DescriptorsFree: co.descriptorCapacity, + DescriptorsTotal: co.descriptorCapacity, + DescriptorsOffset: co.descriptorsOffset, + DescriptorsSize: rdsSize, + DataOffset: co.descriptorsOffset + rdsSize, + } + + f := &FileImage{ + rw: rw, + h: h, + rds: rds, + minIDs: make(map[uint32]uint32), + } + + for i, di := range co.dis { + if err := f.writeDataObject(i, di, co.t); err != nil { + return nil, err + } + } + + if err := f.writeDescriptors(); err != nil { + return nil, err + } + + if err := f.writeHeader(); err != nil { + return nil, err + } + + return f, nil +} + +// CreateContainer creates a new SIF container in rw, according to opts. One or more data objects +// can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptCreateWithCloseOnUnload. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to the current time. To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) { + id, err := uuid.NewRandom() + if err != nil { + return nil, err + } + + co := createOpts{ + id: id, + descriptorsOffset: 4096, + descriptorCapacity: 48, + t: time.Now(), + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&co); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := createContainer(rw, co) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = co.closeOnUnload + return f, nil +} + +// CreateContainerAtPath creates a new SIF container file at path, according to opts. One or more +// data objects can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to the current time. To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) { + fp, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := CreateContainer(fp, opts...) + if err != nil { + fp.Close() + os.Remove(fp.Name()) + + return nil, err + } + + f.closeOnUnload = true + return f, nil +} + +// addOpts accumulates object add options. +type addOpts struct { + t time.Time +} + +// AddOpt are used to specify object add options. +type AddOpt func(*addOpts) error + +// OptAddDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptAddDeterministic() AddOpt { + return func(ao *addOpts) error { + ao.t = time.Time{} + return nil + } +} + +// OptAddWithTime specifies t as the image modification time. +func OptAddWithTime(t time.Time) AddOpt { + return func(ao *addOpts) error { + ao.t = t + return nil + } +} + +// AddObject adds a new data object and its descriptor into the specified SIF file. +// +// By default, the image modification time is set to the current time for non-deterministic images, +// and unset otherwise. To override this, consider using OptAddDeterministic or OptAddWithTime. +func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error { + ao := addOpts{} + + if !f.isDeterministic() { + ao.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&ao); err != nil { + return fmt.Errorf("%w", err) + } + } + + // Find an unused descriptor. + i := 0 + for _, rd := range f.rds { + if !rd.Used { + break + } + i++ + } + + if err := f.writeDataObject(i, di, ao.t); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = ao.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// isLast return true if the data object associated with d is the last in f. +func (f *FileImage) isLast(d *rawDescriptor) bool { + isLast := true + + end := d.Offset + d.Size + f.WithDescriptors(func(d Descriptor) bool { + isLast = d.Offset()+d.Size() <= end + return !isLast + }) + + return isLast +} + +// zeroReader is an io.Reader that returns a stream of zero-bytes. +type zeroReader struct{} + +func (zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +// zero overwrites the data object described by d with a stream of zero bytes. +func (f *FileImage) zero(d *rawDescriptor) error { + if _, err := f.rw.Seek(d.Offset, io.SeekStart); err != nil { + return err + } + + _, err := io.CopyN(f.rw, zeroReader{}, d.Size) + return err +} + +// truncateAt truncates f at the start of the padded data object described by d. +func (f *FileImage) truncateAt(d *rawDescriptor) error { + start := d.Offset + d.Size - d.SizeWithPadding + + return f.rw.Truncate(start) +} + +// deleteOpts accumulates object deletion options. +type deleteOpts struct { + zero bool + compact bool + t time.Time +} + +// DeleteOpt are used to specify object deletion options. +type DeleteOpt func(*deleteOpts) error + +// OptDeleteZero specifies whether the deleted object should be zeroed. +func OptDeleteZero(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.zero = b + return nil + } +} + +// OptDeleteCompact specifies whether the image should be compacted following object deletion. +func OptDeleteCompact(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.compact = b + return nil + } +} + +// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptDeleteDeterministic() DeleteOpt { + return func(do *deleteOpts) error { + do.t = time.Time{} + return nil + } +} + +// OptDeleteWithTime specifies t as the image modification time. +func OptDeleteWithTime(t time.Time) DeleteOpt { + return func(do *deleteOpts) error { + do.t = t + return nil + } +} + +var errCompactNotImplemented = errors.New("compact not implemented for non-last object") + +// DeleteObject deletes the data object with id, according to opts. +// +// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following +// object deletion, use OptDeleteCompact. +// +// By default, the image modification time is set to the current time for non-deterministic images, +// and unset otherwise. To override this, consider using OptDeleteDeterministic or +// OptDeleteWithTime. +func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { + do := deleteOpts{} + + if !f.isDeterministic() { + do.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&do); err != nil { + return fmt.Errorf("%w", err) + } + } + + d, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if do.compact && !f.isLast(d) { + return fmt.Errorf("%w", errCompactNotImplemented) + } + + if do.zero { + if err := f.zero(d); err != nil { + return fmt.Errorf("%w", err) + } + } + + if do.compact { + if err := f.truncateAt(d); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.DataSize -= d.SizeWithPadding + } + + f.h.DescriptorsFree++ + f.h.ModifiedAt = do.t.Unix() + + // If we remove the primary partition, set the global header Arch field to HdrArchUnknown + // to indicate that the SIF file doesn't include a primary partition and no dependency + // on any architecture exists. + if d.isPartitionOfType(PartPrimSys) { + f.h.Arch = hdrArchUnknown + } + + // Reset rawDescripter with empty struct + *d = rawDescriptor{} + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// setOpts accumulates object set options. +type setOpts struct { + t time.Time +} + +// SetOpt are used to specify object set options. +type SetOpt func(*setOpts) error + +// OptSetDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptSetDeterministic() SetOpt { + return func(so *setOpts) error { + so.t = time.Time{} + return nil + } +} + +// OptSetWithTime specifies t as the image/object modification time. +func OptSetWithTime(t time.Time) SetOpt { + return func(so *setOpts) error { + so.t = t + return nil + } +} + +var ( + errNotPartition = errors.New("data object not a partition") + errNotSystem = errors.New("data object not a system partition") +) + +// SetPrimPart sets the specified system partition to be the primary one. +// +// By default, the image/object modification times are set to the current time for +// non-deterministic images, and unset otherwise. To override this, consider using +// OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { + so := setOpts{} + + if !f.isDeterministic() { + so.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + descr, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if descr.DataType != DataPartition { + return fmt.Errorf("%w", errNotPartition) + } + + var p partition + if err := descr.getExtra(binaryUnmarshaler{&p}); err != nil { + return fmt.Errorf("%w", err) + } + + // if already primary system partition, nothing to do + if p.Parttype == PartPrimSys { + return nil + } + + if p.Parttype != PartSystem { + return fmt.Errorf("%w", errNotSystem) + } + + // If there is currently a primary system partition, update it. + if d, err := f.getDescriptor(WithPartitionType(PartPrimSys)); err == nil { + var p partition + if err := d.getExtra(binaryUnmarshaler{&p}); err != nil { + return fmt.Errorf("%w", err) + } + + p.Parttype = PartSystem + + if err := d.setExtra(p); err != nil { + return fmt.Errorf("%w", err) + } + + d.ModifiedAt = so.t.Unix() + } else if !errors.Is(err, ErrObjectNotFound) { + return fmt.Errorf("%w", err) + } + + // Update the descriptor of the new primary system partition. + p.Parttype = PartPrimSys + + if err := descr.setExtra(p); err != nil { + return fmt.Errorf("%w", err) + } + + descr.ModifiedAt = so.t.Unix() + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.Arch = p.Arch + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// SetMetadata sets the metadata of the data object with id to md, according to opts. +// +// By default, the image/object modification times are set to the current time for +// non-deterministic images, and unset otherwise. To override this, consider using +// OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetMetadata(id uint32, md encoding.BinaryMarshaler, opts ...SetOpt) error { + so := setOpts{} + + if !f.isDeterministic() { + so.t = time.Now() + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + rd, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if err := rd.setExtra(md); err != nil { + return fmt.Errorf("%w", err) + } + + rd.ModifiedAt = so.t.Unix() + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go new file mode 100644 index 0000000000..3b7a47767c --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go @@ -0,0 +1,392 @@ +// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "bytes" + "crypto" + "crypto/sha256" + "encoding" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "strings" + "time" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// rawDescriptor represents an on-disk object descriptor. +type rawDescriptor struct { + DataType DataType + Used bool + ID uint32 + GroupID uint32 + LinkedID uint32 + Offset int64 + Size int64 + SizeWithPadding int64 + + CreatedAt int64 + ModifiedAt int64 + UID int64 // Deprecated: UID exists for historical compatibility and should not be used. + GID int64 // Deprecated: GID exists for historical compatibility and should not be used. + Name [descrNameLen]byte + Extra [descrMaxPrivLen]byte +} + +// partition represents the SIF partition data object descriptor. +type partition struct { + Fstype FSType + Parttype PartType + Arch archType +} + +// MarshalBinary encodes p into binary format. +func (p partition) MarshalBinary() ([]byte, error) { + return binaryMarshaler{p}.MarshalBinary() +} + +// signature represents the SIF signature data object descriptor. +type signature struct { + Hashtype hashType + Entity [descrEntityLen]byte +} + +// cryptoMessage represents the SIF crypto message object descriptor. +type cryptoMessage struct { + Formattype FormatType + Messagetype MessageType +} + +// sbom represents the SIF SBOM data object descriptor. +type sbom struct { + Format SBOMFormat +} + +// ociBlob represents the OCI Blob data object descriptor. +type ociBlob struct { + hasher hash.Hash // accumulates hash while writing blob. + digest v1.Hash +} + +// newOCIBlobDigest returns a new ociBlob, that accumulates the digest of an OCI blob as it is +// read. The caller should take care to ensure that the entire contents of the blob have been +// written to the returned ociBlob prior to calling MarshalBinary. +func newOCIBlobDigest() *ociBlob { + return &ociBlob{ + hasher: sha256.New(), + digest: v1.Hash{ + Algorithm: "sha256", + }, + } +} + +// MarshalBinary encodes ob into binary format. +func (ob *ociBlob) MarshalBinary() ([]byte, error) { + ob.digest.Hex = hex.EncodeToString(ob.hasher.Sum(nil)) + + return ob.digest.MarshalText() +} + +// UnmarshalBinary decodes b into ob. +func (ob *ociBlob) UnmarshalBinary(b []byte) error { + if before, _, ok := bytes.Cut(b, []byte{0x00}); ok { + b = before + } + + return ob.digest.UnmarshalText(b) +} + +// The binaryMarshaler type is an adapter that allows a type suitable for use with the +// encoding/binary package to be used as an encoding.BinaryMarshaler. +type binaryMarshaler struct{ any } + +// MarshalBinary encodes m into binary format. +func (m binaryMarshaler) MarshalBinary() ([]byte, error) { + var b bytes.Buffer + err := binary.Write(&b, binary.LittleEndian, m.any) + return b.Bytes(), err +} + +// The binaryUnmarshaler type is an adapter that allows a type suitable for use with the +// encoding/binary package to be used as an encoding.BinaryUnmarshaler. +type binaryUnmarshaler struct{ any } + +// UnmarshalBinary decodes b into u. +func (u binaryUnmarshaler) UnmarshalBinary(b []byte) error { + return binary.Read(bytes.NewReader(b), binary.LittleEndian, u.any) +} + +var errNameTooLarge = errors.New("name value too large") + +// setName encodes name into the name field of d. +func (d *rawDescriptor) setName(name string) error { + if len(name) > len(d.Name) { + return errNameTooLarge + } + + for i := copy(d.Name[:], name); i < len(d.Name); i++ { + d.Name[i] = 0 + } + + return nil +} + +var errExtraTooLarge = errors.New("extra value too large") + +// setExtra marshals metadata from md into the "extra" field of d. +func (d *rawDescriptor) setExtra(md encoding.BinaryMarshaler) error { + if md == nil { + return nil + } + + extra, err := md.MarshalBinary() + if err != nil { + return err + } + + if len(extra) > len(d.Extra) { + return errExtraTooLarge + } + + for i := copy(d.Extra[:], extra); i < len(d.Extra); i++ { + d.Extra[i] = 0 + } + + return nil +} + +// getExtra unmarshals metadata from the "extra" field of d into md. +func (d *rawDescriptor) getExtra(md encoding.BinaryUnmarshaler) error { + return md.UnmarshalBinary(d.Extra[:]) +} + +// getPartitionMetadata gets metadata for a partition data object. +func (d rawDescriptor) getPartitionMetadata() (FSType, PartType, string, error) { + if got, want := d.DataType, DataPartition; got != want { + return 0, 0, "", &unexpectedDataTypeError{got, []DataType{want}} + } + + var p partition + + if err := d.getExtra(binaryUnmarshaler{&p}); err != nil { + return 0, 0, "", err + } + + return p.Fstype, p.Parttype, p.Arch.GoArch(), nil +} + +// isPartitionOfType returns true if d is a partition data object of type pt. +func (d rawDescriptor) isPartitionOfType(pt PartType) bool { + _, t, _, err := d.getPartitionMetadata() + if err != nil { + return false + } + return t == pt +} + +// Descriptor represents the SIF descriptor type. +type Descriptor struct { + r io.ReaderAt // Backing storage. + + raw rawDescriptor // Raw descriptor from image. + + relativeID uint32 // ID relative to minimum ID of object group. +} + +// DataType returns the type of data object. +func (d Descriptor) DataType() DataType { return d.raw.DataType } + +// ID returns the data object ID of d. +func (d Descriptor) ID() uint32 { return d.raw.ID } + +// GroupID returns the data object group ID of d, or zero if d is not part of a data object +// group. +func (d Descriptor) GroupID() uint32 { return d.raw.GroupID &^ descrGroupMask } + +// LinkedID returns the object/group ID d is linked to, or zero if d does not contain a linked +// ID. If isGroup is true, the returned id is an object group ID. Otherwise, the returned id is a +// data object ID. +// +//nolint:nonamedreturns // Named returns effective as documentation. +func (d Descriptor) LinkedID() (id uint32, isGroup bool) { + return d.raw.LinkedID &^ descrGroupMask, d.raw.LinkedID&descrGroupMask == descrGroupMask +} + +// Offset returns the offset of the data object. +func (d Descriptor) Offset() int64 { return d.raw.Offset } + +// Size returns the data object size. +func (d Descriptor) Size() int64 { return d.raw.Size } + +// CreatedAt returns the creation time of the data object. +func (d Descriptor) CreatedAt() time.Time { return time.Unix(d.raw.CreatedAt, 0) } + +// ModifiedAt returns the modification time of the data object. +func (d Descriptor) ModifiedAt() time.Time { return time.Unix(d.raw.ModifiedAt, 0) } + +// Name returns the name of the data object. +func (d Descriptor) Name() string { return strings.TrimRight(string(d.raw.Name[:]), "\000") } + +// GetMetadata unmarshals metadata from the "extra" field of d into md. +func (d Descriptor) GetMetadata(md encoding.BinaryUnmarshaler) error { + if err := d.raw.getExtra(md); err != nil { + return fmt.Errorf("%w", err) + } + return nil +} + +// PartitionMetadata gets metadata for a partition data object. +// +//nolint:nonamedreturns // Named returns effective as documentation. +func (d Descriptor) PartitionMetadata() (fs FSType, pt PartType, arch string, err error) { + fs, pt, arch, err = d.raw.getPartitionMetadata() + if err != nil { + return 0, 0, "", fmt.Errorf("%w", err) + } + return fs, pt, arch, err +} + +var errHashUnsupported = errors.New("hash algorithm unsupported") + +// getHashType converts ht into a crypto.Hash. +func getHashType(ht hashType) (crypto.Hash, error) { + switch ht { + case hashSHA256: + return crypto.SHA256, nil + case hashSHA384: + return crypto.SHA384, nil + case hashSHA512: + return crypto.SHA512, nil + case hashBLAKE2S: + return crypto.BLAKE2s_256, nil + case hashBLAKE2B: + return crypto.BLAKE2b_256, nil + } + return 0, errHashUnsupported +} + +// SignatureMetadata gets metadata for a signature data object. +// +//nolint:nonamedreturns // Named returns effective as documentation. +func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) { + if got, want := d.raw.DataType, DataSignature; got != want { + return ht, fp, &unexpectedDataTypeError{got, []DataType{want}} + } + + var s signature + + if err := d.raw.getExtra(binaryUnmarshaler{&s}); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + if ht, err = getHashType(s.Hashtype); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + fp = make([]byte, 20) + + if bytes.Equal(s.Entity[:len(fp)], fp) { + return ht, nil, nil // Fingerprint not present. + } + + copy(fp, s.Entity[:]) + + return ht, fp, nil +} + +// CryptoMessageMetadata gets metadata for a crypto message data object. +func (d Descriptor) CryptoMessageMetadata() (FormatType, MessageType, error) { + if got, want := d.raw.DataType, DataCryptoMessage; got != want { + return 0, 0, &unexpectedDataTypeError{got, []DataType{want}} + } + + var m cryptoMessage + + if err := d.raw.getExtra(binaryUnmarshaler{&m}); err != nil { + return 0, 0, fmt.Errorf("%w", err) + } + + return m.Formattype, m.Messagetype, nil +} + +// SBOMMetadata gets metadata for a SBOM data object. +func (d Descriptor) SBOMMetadata() (SBOMFormat, error) { + if got, want := d.raw.DataType, DataSBOM; got != want { + return 0, &unexpectedDataTypeError{got, []DataType{want}} + } + + var s sbom + + if err := d.raw.getExtra(binaryUnmarshaler{&s}); err != nil { + return 0, fmt.Errorf("%w", err) + } + + return s.Format, nil +} + +// OCIBlobDigest returns the digest for a OCI blob object. +func (d Descriptor) OCIBlobDigest() (v1.Hash, error) { + if got := d.raw.DataType; got != DataOCIRootIndex && got != DataOCIBlob { + return v1.Hash{}, &unexpectedDataTypeError{got, []DataType{DataOCIRootIndex, DataOCIBlob}} + } + + var o ociBlob + + if err := d.raw.getExtra(&o); err != nil { + return v1.Hash{}, fmt.Errorf("%w", err) + } + + return o.digest, nil +} + +// GetData returns the data object associated with descriptor d. +func (d Descriptor) GetData() ([]byte, error) { + b := make([]byte, d.raw.Size) + if _, err := io.ReadFull(d.GetReader(), b); err != nil { + return nil, err + } + return b, nil +} + +// GetReader returns a io.Reader that reads the data object associated with descriptor d. +func (d Descriptor) GetReader() io.Reader { + return io.NewSectionReader(d.r, d.raw.Offset, d.raw.Size) +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from d. +func (d Descriptor) GetIntegrityReader() io.Reader { + fields := []interface{}{ + d.raw.DataType, + d.raw.Used, + d.relativeID, + d.raw.LinkedID, + d.raw.Size, + d.raw.CreatedAt, + d.raw.UID, + d.raw.GID, + } + + // Encode endian-sensitive fields. + data := bytes.Buffer{} + for _, f := range fields { + if err := binary.Write(&data, binary.LittleEndian, f); err != nil { + panic(err) // (*bytes.Buffer).Write() is documented as always returning a nil error. + } + } + + return io.MultiReader( + &data, + bytes.NewReader(d.raw.Name[:]), + bytes.NewReader(d.raw.Extra[:]), + ) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go new file mode 100644 index 0000000000..40318aba6b --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go @@ -0,0 +1,339 @@ +// Copyright (c) 2021-2023, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "crypto" + "encoding" + "errors" + "fmt" + "io" + "time" +) + +// descriptorOpts accumulates data object options. +type descriptorOpts struct { + groupID uint32 + linkID uint32 + alignment int + name string + md encoding.BinaryMarshaler + t time.Time +} + +// DescriptorInputOpt are used to specify data object options. +type DescriptorInputOpt func(DataType, *descriptorOpts) error + +// OptNoGroup specifies the data object is not contained within a data object group. +func OptNoGroup() DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.groupID = 0 + return nil + } +} + +// OptGroupID specifies groupID as data object group ID. +func OptGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.groupID = groupID + return nil + } +} + +// OptLinkedID specifies that the data object is linked to the data object with the specified ID. +func OptLinkedID(id uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if id == 0 { + return ErrInvalidObjectID + } + opts.linkID = id + return nil + } +} + +// OptLinkedGroupID specifies that the data object is linked to the data object group with the +// specified groupID. +func OptLinkedGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.linkID = groupID | descrGroupMask + return nil + } +} + +// OptObjectAlignment specifies n as the data alignment requirement. +func OptObjectAlignment(n int) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.alignment = n + return nil + } +} + +// OptObjectName specifies name as the data object name. +func OptObjectName(name string) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.name = name + return nil + } +} + +// OptObjectTime specifies t as the data object creation time. +func OptObjectTime(t time.Time) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.t = t + return nil + } +} + +// OptMetadata marshals metadata from md into the "extra" field of d. +func OptMetadata(md encoding.BinaryMarshaler) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + opts.md = md + return nil + } +} + +type unexpectedDataTypeError struct { + got DataType + want []DataType +} + +func (e *unexpectedDataTypeError) Error() string { + return fmt.Sprintf("unexpected data type %v, expected one of: %v", e.got, e.want) +} + +func (e *unexpectedDataTypeError) Is(target error) bool { + t, ok := target.(*unexpectedDataTypeError) + if !ok { + return false + } + + if len(t.want) > 0 { + // Use a map to check that the "want" errors in e and t contain the same values, ignoring + // any ordering differences. + acc := make(map[DataType]int, len(t.want)) + + // Increment counter for each data type in e. + for _, dt := range e.want { + if _, ok := acc[dt]; !ok { + acc[dt] = 0 + } + acc[dt]++ + } + + // Decrement counter for each data type in e. + for _, dt := range t.want { + if _, ok := acc[dt]; !ok { + return false + } + acc[dt]-- + } + + // If the "want" errors in e and t are equivalent, all counters should be zero. + for _, n := range acc { + if n != 0 { + return false + } + } + } + + return (e.got == t.got || t.got == 0) +} + +// OptCryptoMessageMetadata sets metadata for a crypto message data object. The format type is set +// to ft, and the message type is set to mt. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptCryptoMessageMetadata(ft FormatType, mt MessageType) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataCryptoMessage; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + m := cryptoMessage{ + Formattype: ft, + Messagetype: mt, + } + + opts.md = binaryMarshaler{m} + return nil + } +} + +var errUnknownArchitcture = errors.New("unknown architecture") + +// OptPartitionMetadata sets metadata for a partition data object. The filesystem type is set to +// fs, the partition type is set to pt, and the CPU architecture is set to arch. The value of arch +// should be the architecture as represented by the Go runtime. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptPartitionMetadata(fs FSType, pt PartType, arch string) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataPartition; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + sifarch := getSIFArch(arch) + if sifarch == hdrArchUnknown { + return fmt.Errorf("%w: %v", errUnknownArchitcture, arch) + } + + p := partition{ + Fstype: fs, + Parttype: pt, + Arch: sifarch, + } + + opts.md = p + return nil + } +} + +// sifHashType converts h into a HashType. +func sifHashType(h crypto.Hash) hashType { + switch h { + case crypto.SHA256: + return hashSHA256 + case crypto.SHA384: + return hashSHA384 + case crypto.SHA512: + return hashSHA512 + case crypto.BLAKE2s_256: + return hashBLAKE2S + case crypto.BLAKE2b_256: + return hashBLAKE2B + } + return 0 +} + +// OptSignatureMetadata sets metadata for a signature data object. The hash type is set to ht, and +// the signing entity fingerprint is set to fp. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptSignatureMetadata(ht crypto.Hash, fp []byte) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataSignature; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + s := signature{ + Hashtype: sifHashType(ht), + } + copy(s.Entity[:], fp) + + opts.md = binaryMarshaler{s} + return nil + } +} + +// OptSBOMMetadata sets metadata for a SBOM data object. The SBOM format is set to f. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptSBOMMetadata(f SBOMFormat) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataSBOM; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + s := sbom{ + Format: f, + } + + opts.md = binaryMarshaler{s} + return nil + } +} + +// DescriptorInput describes a new data object. +type DescriptorInput struct { + dt DataType + r io.Reader + opts descriptorOpts +} + +// DefaultObjectGroup is the default group that data objects are placed in. +const DefaultObjectGroup = 1 + +// NewDescriptorInput returns a DescriptorInput representing a data object of type t, with contents +// read from r, configured according to opts. +// +// It is possible (and often necessary) to store additional metadata related to certain types of +// data objects. Consider supplying options such as OptCryptoMessageMetadata, OptPartitionMetadata, +// OptSignatureMetadata, and OptSBOMMetadata for this purpose. To set custom metadata, use +// OptMetadata. +// +// By default, the data object will be placed in the default data object group (1). To override +// this behavior, use OptNoGroup or OptGroupID. To link this data object, use OptLinkedID or +// OptLinkedGroupID. +// +// By default, the data object will not be aligned unless it is of type DataPartition, in which +// case it will be aligned on a 4096 byte boundary. To override this behavior, consider using +// OptObjectAlignment. +// +// By default, no name is set for data object. To set a name, use OptObjectName. +// +// When creating a new image, data object creation/modification times are set to the image creation +// time. When modifying an existing image, the data object creation/modification time is set to the +// image modification time. To override this behavior, consider using OptObjectTime. +func NewDescriptorInput(t DataType, r io.Reader, opts ...DescriptorInputOpt) (DescriptorInput, error) { + dopts := descriptorOpts{ + groupID: DefaultObjectGroup, + } + + if t == DataPartition { + dopts.alignment = 4096 + } + + // Accumulate hash for OCI blobs as they are written. + if t == DataOCIRootIndex || t == DataOCIBlob { + md := newOCIBlobDigest() + + r = io.TeeReader(r, md.hasher) + + dopts.md = md + } + + for _, opt := range opts { + if err := opt(t, &dopts); err != nil { + return DescriptorInput{}, fmt.Errorf("%w", err) + } + } + + di := DescriptorInput{ + dt: t, + r: r, + opts: dopts, + } + + return di, nil +} + +// fillDescriptor fills d according to di. If di does not explicitly specify a time value, use t. +func (di DescriptorInput) fillDescriptor(t time.Time, d *rawDescriptor) error { + d.DataType = di.dt + d.GroupID = di.opts.groupID | descrGroupMask + d.LinkedID = di.opts.linkID + + if !di.opts.t.IsZero() { + t = di.opts.t + } + d.CreatedAt = t.Unix() + d.ModifiedAt = t.Unix() + + d.UID = 0 + d.GID = 0 + + if err := d.setName(di.opts.name); err != nil { + return err + } + + return d.setExtra(di.opts.md) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go new file mode 100644 index 0000000000..75266e1945 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go @@ -0,0 +1,174 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" +) + +var ( + errInvalidMagic = errors.New("invalid SIF magic") + errIncompatibleVersion = errors.New("incompatible SIF version") +) + +// isValidSif looks at key fields from the global header to assess SIF validity. +func isValidSif(f *FileImage) error { + if f.h.Magic != hdrMagic { + return errInvalidMagic + } + + if f.h.Version != CurrentVersion.bytes() { + return errIncompatibleVersion + } + + return nil +} + +// populateMinIDs populates the minIDs field of f. +func (f *FileImage) populateMinIDs() { + f.minIDs = make(map[uint32]uint32) + f.WithDescriptors(func(d Descriptor) bool { + if minID, ok := f.minIDs[d.raw.GroupID]; !ok || d.ID() < minID { + f.minIDs[d.raw.GroupID] = d.ID() + } + return false + }) +} + +// loadContainer loads a SIF image from rw. +func loadContainer(rw ReadWriter) (*FileImage, error) { + f := FileImage{rw: rw} + + // Read global header. + err := binary.Read( + io.NewSectionReader(rw, 0, int64(binary.Size(f.h))), + binary.LittleEndian, + &f.h, + ) + if err != nil { + return nil, fmt.Errorf("reading global header: %w", err) + } + + if err := isValidSif(&f); err != nil { + return nil, err + } + + // Read descriptors. + f.rds = make([]rawDescriptor, f.h.DescriptorsTotal) + err = binary.Read( + io.NewSectionReader(rw, f.h.DescriptorsOffset, f.h.DescriptorsSize), + binary.LittleEndian, + &f.rds, + ) + if err != nil { + return nil, fmt.Errorf("reading descriptors: %w", err) + } + + f.populateMinIDs() + + return &f, nil +} + +// loadOpts accumulates container loading options. +type loadOpts struct { + flag int + closeOnUnload bool +} + +// LoadOpt are used to specify container loading options. +type LoadOpt func(*loadOpts) error + +// OptLoadWithFlag specifies flag (os.O_RDONLY etc.) to be used when opening the container file. +func OptLoadWithFlag(flag int) LoadOpt { + return func(lo *loadOpts) error { + lo.flag = flag + return nil + } +} + +// OptLoadWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptLoadWithCloseOnUnload(b bool) LoadOpt { + return func(lo *loadOpts) error { + lo.closeOnUnload = b + return nil + } +} + +// LoadContainerFromPath loads a new SIF container from path, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the file is opened for read and write access. To change this behavior, consider +// using OptLoadWithFlag. +func LoadContainerFromPath(path string, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + flag: os.O_RDWR, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + fp, err := os.OpenFile(path, lo.flag, 0) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := loadContainer(fp) + if err != nil { + fp.Close() + + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = true + return f, nil +} + +// LoadContainer loads a new SIF container from rw, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptLoadWithCloseOnUnload. +func LoadContainer(rw ReadWriter, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := loadContainer(rw) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = lo.closeOnUnload + return f, nil +} + +// UnloadContainer unloads f, releasing associated resources. +func (f *FileImage) UnloadContainer() error { + if c, ok := f.rw.(io.Closer); ok && f.closeOnUnload { + if err := c.Close(); err != nil { + return fmt.Errorf("%w", err) + } + } + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go new file mode 100644 index 0000000000..ee7892f716 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go @@ -0,0 +1,222 @@ +// Copyright (c) 2021-2023, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// ErrNoObjects is the error returned when an image contains no data objects. +var ErrNoObjects = errors.New("no objects in image") + +// ErrObjectNotFound is the error returned when a data object is not found. +var ErrObjectNotFound = errors.New("object not found") + +// ErrMultipleObjectsFound is the error returned when multiple data objects are found. +var ErrMultipleObjectsFound = errors.New("multiple objects found") + +// ErrInvalidObjectID is the error returned when an invalid object ID is supplied. +var ErrInvalidObjectID = errors.New("invalid object ID") + +// ErrInvalidGroupID is the error returned when an invalid group ID is supplied. +var ErrInvalidGroupID = errors.New("invalid group ID") + +// DescriptorSelectorFunc returns true if d matches, and false otherwise. +type DescriptorSelectorFunc func(d Descriptor) (bool, error) + +// WithDataType selects descriptors that have data type dt. +func WithDataType(dt DataType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.DataType() == dt, nil + } +} + +// WithID selects descriptors with a matching ID. +func WithID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + return d.ID() == id, nil + } +} + +// WithNoGroup selects descriptors that are not contained within an object group. +func WithNoGroup() DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.GroupID() == 0, nil + } +} + +// WithGroupID returns a selector func that selects descriptors with a matching groupID. +func WithGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + return d.GroupID() == groupID, nil + } +} + +// WithLinkedID selects descriptors that are linked to the data object with specified ID. +func WithLinkedID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + linkedID, isGroup := d.LinkedID() + return !isGroup && linkedID == id, nil + } +} + +// WithLinkedGroupID selects descriptors that are linked to the data object group with specified +// ID. +func WithLinkedGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + linkedID, isGroup := d.LinkedID() + return isGroup && linkedID == groupID, nil + } +} + +// WithPartitionType selects descriptors containing a partition of type pt. +func WithPartitionType(pt PartType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.raw.isPartitionOfType(pt), nil + } +} + +// WithOCIBlobDigest selects descriptors that contain a OCI blob with the specified digest. +func WithOCIBlobDigest(digest v1.Hash) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if h, err := d.OCIBlobDigest(); err == nil { + return h.String() == digest.String(), nil + } + return false, nil + } +} + +// descriptorFromRaw populates a Descriptor from rd. +func (f *FileImage) descriptorFromRaw(rd *rawDescriptor) Descriptor { + return Descriptor{ + raw: *rd, + r: f.rw, + relativeID: rd.ID - f.minIDs[rd.GroupID], + } +} + +// GetDescriptors returns a slice of in-use descriptors for which all selector funcs return true. +// If the image contains no data objects, an error wrapping ErrNoObjects is returned. +func (f *FileImage) GetDescriptors(fns ...DescriptorSelectorFunc) ([]Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return nil, fmt.Errorf("%w", ErrNoObjects) + } + + var ds []Descriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(d *rawDescriptor) error { + ds = append(ds, f.descriptorFromRaw(d)) + return nil + }) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + return ds, nil +} + +// getDescriptor returns a pointer to the in-use descriptor selected by fns. If no descriptor is +// selected by fns, ErrObjectNotFound is returned. If multiple descriptors are selected by fns, +// ErrMultipleObjectsFound is returned. +func (f *FileImage) getDescriptor(fns ...DescriptorSelectorFunc) (*rawDescriptor, error) { + var d *rawDescriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(found *rawDescriptor) error { + if d != nil { + return ErrMultipleObjectsFound + } + d = found + return nil + }) + + if err == nil && d == nil { + err = ErrObjectNotFound + } + + return d, err +} + +// GetDescriptor returns the in-use descriptor selected by fns. If the image contains no data +// objects, an error wrapping ErrNoObjects is returned. If no descriptor is selected by fns, an +// error wrapping ErrObjectNotFound is returned. If multiple descriptors are selected by fns, an +// error wrapping ErrMultipleObjectsFound is returned. +func (f *FileImage) GetDescriptor(fns ...DescriptorSelectorFunc) (Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return Descriptor{}, fmt.Errorf("%w", ErrNoObjects) + } + + d, err := f.getDescriptor(fns...) + if err != nil { + return Descriptor{}, fmt.Errorf("%w", err) + } + + return f.descriptorFromRaw(d), nil +} + +// multiSelectorFunc returns a DescriptorSelectorFunc that selects a descriptor iff all of fns +// select the descriptor. +func multiSelectorFunc(fns ...DescriptorSelectorFunc) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + for _, fn := range fns { + if ok, err := fn(d); !ok || err != nil { + return ok, err + } + } + return true, nil + } +} + +// withDescriptors calls onMatchFn with each in-use descriptor in f for which selectFn returns +// true. If selectFn or onMatchFn return a non-nil error, the iteration halts, and the error is +// returned to the caller. +func (f *FileImage) withDescriptors(selectFn DescriptorSelectorFunc, onMatchFn func(*rawDescriptor) error) error { + for i, d := range f.rds { + if !d.Used { + continue + } + + if ok, err := selectFn(f.descriptorFromRaw(&f.rds[i])); err != nil { + return err + } else if !ok { + continue + } + + if err := onMatchFn(&f.rds[i]); err != nil { + return err + } + } + + return nil +} + +var errAbort = errors.New("abort") + +// abortOnMatch is a semantic convenience function that always returns a non-nil error, which can +// be used as a no-op matchFn. +func abortOnMatch(*rawDescriptor) error { return errAbort } + +// WithDescriptors calls fn with each in-use descriptor in f, until fn returns true. +func (f *FileImage) WithDescriptors(fn func(d Descriptor) bool) { + selectFn := func(d Descriptor) (bool, error) { + return fn(d), nil + } + _ = f.withDescriptors(selectFn, abortOnMatch) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go new file mode 100644 index 0000000000..bf39dfe4cd --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go @@ -0,0 +1,416 @@ +// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +// Package sif implements data structures and routines to create +// and access SIF files. +// +// Layout of a SIF file (example): +// +// .================================================. +// | GLOBAL HEADER: Sifheader | +// | - launch: "#!/usr/bin/env..." | +// | - magic: "SIF_MAGIC" | +// | - version: "1" | +// | - arch: "4" | +// | - uuid: b2659d4e-bd50-4ea5-bd17-eec5e54f918e | +// | - ctime: 1504657553 | +// | - mtime: 1504657653 | +// | - ndescr: 3 | +// | - descroff: 120 | --. +// | - descrlen: 432 | | +// | - dataoff: 4096 | | +// | - datalen: 619362 | | +// |------------------------------------------------| <-' +// | DESCR[0]: Sifdeffile | +// | - Sifcommon | +// | - datatype: DATA_DEFFILE | +// | - id: 1 | +// | - groupid: 1 | +// | - link: NONE | +// | - fileoff: 4096 | --. +// | - filelen: 222 | | +// |------------------------------------------------| <-----. +// | DESCR[1]: Sifpartition | | | +// | - Sifcommon | | | +// | - datatype: DATA_PARTITION | | | +// | - id: 2 | | | +// | - groupid: 1 | | | +// | - link: NONE | | | +// | - fileoff: 4318 | ----. | +// | - filelen: 618496 | | | | +// | - fstype: Squashfs | | | | +// | - parttype: System | | | | +// | - content: Linux | | | | +// |------------------------------------------------| | | | +// | DESCR[2]: Sifsignature | | | | +// | - Sifcommon | | | | +// | - datatype: DATA_SIGNATURE | | | | +// | - id: 3 | | | | +// | - groupid: NONE | | | | +// | - link: 2 | ------' +// | - fileoff: 622814 | ------. +// | - filelen: 644 | | | | +// | - hashtype: SHA384 | | | | +// | - entity: @ | | | | +// |------------------------------------------------| <-' | | +// | Definition file data | | | +// | . | | | +// | . | | | +// | . | | | +// |------------------------------------------------| <---' | +// | File system partition image | | +// | . | | +// | . | | +// | . | | +// |------------------------------------------------| <-----' +// | Signed verification data | +// | . | +// | . | +// | . | +// `================================================' +package sif + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/google/uuid" +) + +// SIF header constants and quantities. +const ( + hdrLaunchLen = 32 // len("#!/usr/bin/env... ") + hdrMagicLen = 10 // len("SIF_MAGIC") + hdrVersionLen = 3 // len("99") +) + +var hdrMagic = [...]byte{'S', 'I', 'F', '_', 'M', 'A', 'G', 'I', 'C', '\x00'} + +// SpecVersion specifies a SIF specification version. +type SpecVersion uint8 + +func (v SpecVersion) String() string { return fmt.Sprintf("%02d", v) } + +// bytes returns the value of b, formatted for direct inclusion in a SIF header. +func (v SpecVersion) bytes() [hdrVersionLen]byte { + var b [3]byte + copy(b[:], fmt.Sprintf("%02d", v)) + return b +} + +// SIF specification versions. +const ( + version01 SpecVersion = iota + 1 +) + +// CurrentVersion specifies the current SIF specification version. +const CurrentVersion = version01 + +const ( + descrGroupMask = 0xf0000000 // groups start at that offset + descrEntityLen = 256 // len("Joe Bloe ...") + descrNameLen = 128 // descriptor name (string identifier) + descrMaxPrivLen = 384 // size reserved for descriptor specific data +) + +// DataType represents the different SIF data object types stored in the image. +type DataType int32 + +// List of supported SIF data types. +const ( + DataDeffile DataType = iota + 0x4001 // definition file data object + DataEnvVar // environment variables data object + DataLabels // JSON labels data object + DataPartition // file system data object + DataSignature // signing/verification data object + DataGenericJSON // generic JSON meta-data + DataGeneric // generic / raw data + DataCryptoMessage // cryptographic message data object + DataSBOM // software bill of materials + DataOCIRootIndex // root OCI index + DataOCIBlob // oci blob data object +) + +// String returns a human-readable representation of t. +func (t DataType) String() string { + switch t { + case DataDeffile: + return "Def.FILE" + case DataEnvVar: + return "Env.Vars" + case DataLabels: + return "JSON.Labels" + case DataPartition: + return "FS" + case DataSignature: + return "Signature" + case DataGenericJSON: + return "JSON.Generic" + case DataGeneric: + return "Generic/Raw" + case DataCryptoMessage: + return "Cryptographic Message" + case DataSBOM: + return "SBOM" + case DataOCIRootIndex: + return "OCI.RootIndex" + case DataOCIBlob: + return "OCI.Blob" + } + return "Unknown" +} + +// FSType represents the different SIF file system types found in partition data objects. +type FSType int32 + +// List of supported file systems. +const ( + FsSquash FSType = iota + 1 // Squashfs file system, RDONLY + FsExt3 // EXT3 file system, RDWR (deprecated) + FsImmuObj // immutable data object archive + FsRaw // raw data + FsEncryptedSquashfs // Encrypted Squashfs file system, RDONLY +) + +// String returns a human-readable representation of t. +func (t FSType) String() string { + switch t { + case FsSquash: + return "Squashfs" + case FsExt3: + return "Ext3" + case FsImmuObj: + return "Archive" + case FsRaw: + return "Raw" + case FsEncryptedSquashfs: + return "Encrypted squashfs" + } + return "Unknown" +} + +// PartType represents the different SIF container partition types (system and data). +type PartType int32 + +// List of supported partition types. +const ( + PartSystem PartType = iota + 1 // partition hosts an operating system + PartPrimSys // partition hosts the primary operating system + PartData // partition hosts data only + PartOverlay // partition hosts an overlay +) + +// String returns a human-readable representation of t. +func (t PartType) String() string { + switch t { + case PartSystem: + return "System" + case PartPrimSys: + return "*System" + case PartData: + return "Data" + case PartOverlay: + return "Overlay" + } + return "Unknown" +} + +// hashType represents the different SIF hashing function types used to fingerprint data objects. +type hashType int32 + +// List of supported hash functions. +const ( + hashSHA256 hashType = iota + 1 + hashSHA384 + hashSHA512 + hashBLAKE2S + hashBLAKE2B +) + +// FormatType represents the different formats used to store cryptographic message objects. +type FormatType int32 + +// List of supported cryptographic message formats. +const ( + FormatOpenPGP FormatType = iota + 1 + FormatPEM +) + +// String returns a human-readable representation of t. +func (t FormatType) String() string { + switch t { + case FormatOpenPGP: + return "OpenPGP" + case FormatPEM: + return "PEM" + } + return "Unknown" +} + +// MessageType represents the different messages stored within cryptographic message objects. +type MessageType int32 + +// List of supported cryptographic message formats. +const ( + // openPGP formatted messages. + MessageClearSignature MessageType = 0x100 + + // PEM formatted messages. + MessageRSAOAEP MessageType = 0x200 +) + +// String returns a human-readable representation of t. +func (t MessageType) String() string { + switch t { + case MessageClearSignature: + return "Clear Signature" + case MessageRSAOAEP: + return "RSA-OAEP" + } + return "Unknown" +} + +// SBOMFormat represents the format used to store an SBOM object. +type SBOMFormat int32 + +// List of supported SBOM formats. +const ( + SBOMFormatCycloneDXJSON SBOMFormat = iota + 1 // CycloneDX (JSON) + SBOMFormatCycloneDXXML // CycloneDX (XML) + SBOMFormatGitHubJSON // GitHub dependency snapshot (JSON) + SBOMFormatSPDXJSON // SPDX (JSON) + SBOMFormatSPDXRDF // SPDX (RDF/xml) + SBOMFormatSPDXTagValue // SPDX (tag/value) + SBOMFormatSPDXYAML // SPDX (YAML) + SBOMFormatSyftJSON // Syft (JSON) +) + +// String returns a human-readable representation of f. +func (f SBOMFormat) String() string { + switch f { + case SBOMFormatCycloneDXJSON: + return "cyclonedx-json" + case SBOMFormatCycloneDXXML: + return "cyclonedx-xml" + case SBOMFormatGitHubJSON: + return "github-json" + case SBOMFormatSPDXJSON: + return "spdx-json" + case SBOMFormatSPDXRDF: + return "spdx-rdf" + case SBOMFormatSPDXTagValue: + return "spdx-tag-value" + case SBOMFormatSPDXYAML: + return "spdx-yaml" + case SBOMFormatSyftJSON: + return "syft-json" + } + return "unknown" +} + +// header describes a loaded SIF file. +type header struct { + LaunchScript [hdrLaunchLen]byte + + Magic [hdrMagicLen]byte + Version [hdrVersionLen]byte + Arch archType + ID uuid.UUID + + CreatedAt int64 + ModifiedAt int64 + + DescriptorsFree int64 + DescriptorsTotal int64 + DescriptorsOffset int64 + DescriptorsSize int64 + DataOffset int64 + DataSize int64 +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from h. +func (h header) GetIntegrityReader() io.Reader { + return io.MultiReader( + bytes.NewReader(h.LaunchScript[:]), + bytes.NewReader(h.Magic[:]), + bytes.NewReader(h.Version[:]), + bytes.NewReader(h.ID[:]), + ) +} + +// ReadWriter describes the interface required to read and write SIF images. +type ReadWriter interface { + io.ReaderAt + io.WriteSeeker + Truncate(int64) error +} + +// FileImage describes the representation of a SIF file in memory. +type FileImage struct { + rw ReadWriter // Backing storage for image. + + h header // Raw global header from image. + rds []rawDescriptor // Raw descriptors from image. + + closeOnUnload bool // Close rw on Unload. + minIDs map[uint32]uint32 // Minimum object IDs for each group ID. +} + +// LaunchScript returns the image launch script. +func (f *FileImage) LaunchScript() string { + return string(bytes.TrimRight(f.h.LaunchScript[:], "\x00")) +} + +// Version returns the SIF specification version of the image. +func (f *FileImage) Version() string { + return string(bytes.TrimRight(f.h.Version[:], "\x00")) +} + +// PrimaryArch returns the primary CPU architecture of the image, or "unknown" if the primary CPU +// architecture cannot be determined. +func (f *FileImage) PrimaryArch() string { return f.h.Arch.GoArch() } + +// ID returns the ID of the image. +func (f *FileImage) ID() string { return f.h.ID.String() } + +// CreatedAt returns the creation time of the image. +func (f *FileImage) CreatedAt() time.Time { return time.Unix(f.h.CreatedAt, 0) } + +// ModifiedAt returns the last modification time of the image. +func (f *FileImage) ModifiedAt() time.Time { return time.Unix(f.h.ModifiedAt, 0) } + +// DescriptorsFree returns the number of free descriptors in the image. +func (f *FileImage) DescriptorsFree() int64 { return f.h.DescriptorsFree } + +// DescriptorsTotal returns the total number of descriptors in the image. +func (f *FileImage) DescriptorsTotal() int64 { return f.h.DescriptorsTotal } + +// DescriptorsOffset returns the offset (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsOffset() int64 { return f.h.DescriptorsOffset } + +// DescriptorsSize returns the size (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsSize() int64 { return f.h.DescriptorsSize } + +// DataOffset returns the offset (in bytes) of the data section in the image. +func (f *FileImage) DataOffset() int64 { return f.h.DataOffset } + +// DataSize returns the size (in bytes) of the data section in the image. +func (f *FileImage) DataSize() int64 { return f.h.DataSize } + +// GetHeaderIntegrityReader returns an io.Reader that reads the integrity-protected fields from the +// header of the image. +func (f *FileImage) GetHeaderIntegrityReader() io.Reader { + return f.h.GetIntegrityReader() +} + +// isDeterministic returns true if the UUID and timestamps in the header of f are set to +// deterministic values. +func (f *FileImage) isDeterministic() bool { + return f.h.ID == uuid.Nil && f.CreatedAt().IsZero() && f.ModifiedAt().IsZero() +} diff --git a/vendor/github.com/tchap/go-patricia/v2/AUTHORS b/vendor/github.com/tchap/go-patricia/v2/AUTHORS new file mode 100644 index 0000000000..e640b0bf51 --- /dev/null +++ b/vendor/github.com/tchap/go-patricia/v2/AUTHORS @@ -0,0 +1,3 @@ +This is the complete list of go-patricia copyright holders: + +Ondřej Kupka diff --git a/vendor/github.com/tchap/go-patricia/v2/LICENSE b/vendor/github.com/tchap/go-patricia/v2/LICENSE new file mode 100644 index 0000000000..e50d398e98 --- /dev/null +++ b/vendor/github.com/tchap/go-patricia/v2/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 The AUTHORS + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tchap/go-patricia/v2/patricia/children.go b/vendor/github.com/tchap/go-patricia/v2/patricia/children.go new file mode 100644 index 0000000000..bcfd0a5dd0 --- /dev/null +++ b/vendor/github.com/tchap/go-patricia/v2/patricia/children.go @@ -0,0 +1,363 @@ +// Copyright (c) 2014 The go-patricia AUTHORS +// +// Use of this source code is governed by The MIT License +// that can be found in the LICENSE file. + +package patricia + +import ( + "fmt" + "io" + "sort" +) + +type childList interface { + length() int + head() *Trie + add(child *Trie) childList + remove(b byte) + replace(b byte, child *Trie) + next(b byte) *Trie + walk(prefix *Prefix, visitor VisitorFunc) error + print(w io.Writer, indent int) + clone() childList + total() int +} + +type tries []*Trie + +func (t tries) Len() int { + return len(t) +} + +func (t tries) Less(i, j int) bool { + strings := sort.StringSlice{string(t[i].prefix), string(t[j].prefix)} + return strings.Less(0, 1) +} + +func (t tries) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +type sparseChildList struct { + children tries +} + +func newSparseChildList(maxChildrenPerSparseNode int) childList { + return &sparseChildList{ + children: make(tries, 0, maxChildrenPerSparseNode), + } +} + +func (list *sparseChildList) length() int { + return len(list.children) +} + +func (list *sparseChildList) head() *Trie { + return list.children[0] +} + +func (list *sparseChildList) add(child *Trie) childList { + // Search for an empty spot and insert the child if possible. + if len(list.children) != cap(list.children) { + list.children = append(list.children, child) + return list + } + + // Otherwise we have to transform to the dense list type. + return newDenseChildList(list, child) +} + +func (list *sparseChildList) remove(b byte) { + for i, node := range list.children { + if node.prefix[0] == b { + list.children[i] = list.children[len(list.children)-1] + list.children[len(list.children)-1] = nil + list.children = list.children[:len(list.children)-1] + return + } + } + + // This is not supposed to be reached. + panic("removing non-existent child") +} + +func (list *sparseChildList) replace(b byte, child *Trie) { + // Make a consistency check. + if p0 := child.prefix[0]; p0 != b { + panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b)) + } + + // Seek the child and replace it. + for i, node := range list.children { + if node.prefix[0] == b { + list.children[i] = child + return + } + } +} + +func (list *sparseChildList) next(b byte) *Trie { + for _, child := range list.children { + if child.prefix[0] == b { + return child + } + } + return nil +} + +func (list *sparseChildList) walk(prefix *Prefix, visitor VisitorFunc) error { + + sort.Sort(list.children) + + for _, child := range list.children { + *prefix = append(*prefix, child.prefix...) + if child.item != nil { + err := visitor(*prefix, child.item) + if err != nil { + if err == SkipSubtree { + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + continue + } + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + return err + } + } + + err := child.children.walk(prefix, visitor) + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + if err != nil { + return err + } + } + + return nil +} + +func (list *sparseChildList) total() int { + tot := 0 + for _, child := range list.children { + if child != nil { + tot = tot + child.total() + } + } + return tot +} + +func (list *sparseChildList) clone() childList { + clones := make(tries, len(list.children), cap(list.children)) + for i, child := range list.children { + clones[i] = child.Clone() + } + + return &sparseChildList{ + children: clones, + } +} + +func (list *sparseChildList) print(w io.Writer, indent int) { + for _, child := range list.children { + if child != nil { + child.print(w, indent) + } + } +} + +type denseChildList struct { + min int + max int + numChildren int + headIndex int + children []*Trie +} + +func newDenseChildList(list *sparseChildList, child *Trie) childList { + var ( + min int = 255 + max int = 0 + ) + for _, child := range list.children { + b := int(child.prefix[0]) + if b < min { + min = b + } + if b > max { + max = b + } + } + + b := int(child.prefix[0]) + if b < min { + min = b + } + if b > max { + max = b + } + + children := make([]*Trie, max-min+1) + for _, child := range list.children { + children[int(child.prefix[0])-min] = child + } + children[int(child.prefix[0])-min] = child + + return &denseChildList{ + min: min, + max: max, + numChildren: list.length() + 1, + headIndex: 0, + children: children, + } +} + +func (list *denseChildList) length() int { + return list.numChildren +} + +func (list *denseChildList) head() *Trie { + return list.children[list.headIndex] +} + +func (list *denseChildList) add(child *Trie) childList { + b := int(child.prefix[0]) + var i int + + switch { + case list.min <= b && b <= list.max: + if list.children[b-list.min] != nil { + panic("dense child list collision detected") + } + i = b - list.min + list.children[i] = child + + case b < list.min: + children := make([]*Trie, list.max-b+1) + i = 0 + children[i] = child + copy(children[list.min-b:], list.children) + list.children = children + list.min = b + + default: // b > list.max + children := make([]*Trie, b-list.min+1) + i = b - list.min + children[i] = child + copy(children, list.children) + list.children = children + list.max = b + } + + list.numChildren++ + if i < list.headIndex { + list.headIndex = i + } + return list +} + +func (list *denseChildList) remove(b byte) { + i := int(b) - list.min + if list.children[i] == nil { + // This is not supposed to be reached. + panic("removing non-existent child") + } + list.numChildren-- + list.children[i] = nil + + // Update head index. + if i == list.headIndex { + for ; i < len(list.children); i++ { + if list.children[i] != nil { + list.headIndex = i + return + } + } + } +} + +func (list *denseChildList) replace(b byte, child *Trie) { + // Make a consistency check. + if p0 := child.prefix[0]; p0 != b { + panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b)) + } + + // Replace the child. + list.children[int(b)-list.min] = child +} + +func (list *denseChildList) next(b byte) *Trie { + i := int(b) + if i < list.min || list.max < i { + return nil + } + return list.children[i-list.min] +} + +func (list *denseChildList) walk(prefix *Prefix, visitor VisitorFunc) error { + for _, child := range list.children { + if child == nil { + continue + } + *prefix = append(*prefix, child.prefix...) + if child.item != nil { + if err := visitor(*prefix, child.item); err != nil { + if err == SkipSubtree { + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + continue + } + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + return err + } + } + + err := child.children.walk(prefix, visitor) + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + if err != nil { + return err + } + } + + return nil +} + +func (list *denseChildList) print(w io.Writer, indent int) { + for _, child := range list.children { + if child != nil { + child.print(w, indent) + } + } +} + +func (list *denseChildList) clone() childList { + clones := make(tries, cap(list.children)) + + if list.numChildren != 0 { + clonedCount := 0 + for i := list.headIndex; i < len(list.children); i++ { + child := list.children[i] + if child != nil { + clones[i] = child.Clone() + clonedCount++ + if clonedCount == list.numChildren { + break + } + } + } + } + + return &denseChildList{ + min: list.min, + max: list.max, + numChildren: list.numChildren, + headIndex: list.headIndex, + children: clones, + } +} + +func (list *denseChildList) total() int { + tot := 0 + for _, child := range list.children { + if child != nil { + tot = tot + child.total() + } + } + return tot +} diff --git a/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go b/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go new file mode 100644 index 0000000000..7b9975e383 --- /dev/null +++ b/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go @@ -0,0 +1,606 @@ +// Copyright (c) 2014 The go-patricia AUTHORS +// +// Use of this source code is governed by The MIT License +// that can be found in the LICENSE file. + +package patricia + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" +) + +//------------------------------------------------------------------------------ +// Trie +//------------------------------------------------------------------------------ + +const ( + DefaultMaxPrefixPerNode = 10 + DefaultMaxChildrenPerSparseNode = 8 +) + +type ( + Prefix []byte + Item interface{} + VisitorFunc func(prefix Prefix, item Item) error +) + +// Trie is a generic patricia trie that allows fast retrieval of items by prefix. +// and other funky stuff. +// +// Trie is not thread-safe. +type Trie struct { + prefix Prefix + item Item + + maxPrefixPerNode int + maxChildrenPerSparseNode int + + children childList +} + +// Public API ------------------------------------------------------------------ + +type Option func(*Trie) + +// Trie constructor. +func NewTrie(options ...Option) *Trie { + trie := &Trie{} + + for _, opt := range options { + opt(trie) + } + + if trie.maxPrefixPerNode <= 0 { + trie.maxPrefixPerNode = DefaultMaxPrefixPerNode + } + if trie.maxChildrenPerSparseNode <= 0 { + trie.maxChildrenPerSparseNode = DefaultMaxChildrenPerSparseNode + } + + trie.children = newSparseChildList(trie.maxChildrenPerSparseNode) + return trie +} + +func MaxPrefixPerNode(value int) Option { + return func(trie *Trie) { + trie.maxPrefixPerNode = value + } +} + +func MaxChildrenPerSparseNode(value int) Option { + return func(trie *Trie) { + trie.maxChildrenPerSparseNode = value + } +} + +// Clone makes a copy of an existing trie. +// Items stored in both tries become shared, obviously. +func (trie *Trie) Clone() *Trie { + return &Trie{ + prefix: append(Prefix(nil), trie.prefix...), + item: trie.item, + maxPrefixPerNode: trie.maxPrefixPerNode, + maxChildrenPerSparseNode: trie.maxChildrenPerSparseNode, + children: trie.children.clone(), + } +} + +// Item returns the item stored in the root of this trie. +func (trie *Trie) Item() Item { + return trie.item +} + +// Insert inserts a new item into the trie using the given prefix. Insert does +// not replace existing items. It returns false if an item was already in place. +func (trie *Trie) Insert(key Prefix, item Item) (inserted bool) { + return trie.put(key, item, false) +} + +// Set works much like Insert, but it always sets the item, possibly replacing +// the item previously inserted. +func (trie *Trie) Set(key Prefix, item Item) { + trie.put(key, item, true) +} + +// Get returns the item located at key. +// +// This method is a bit dangerous, because Get can as well end up in an internal +// node that is not really representing any user-defined value. So when nil is +// a valid value being used, it is not possible to tell if the value was inserted +// into the tree by the user or not. A possible workaround for this is not to use +// nil interface as a valid value, even using zero value of any type is enough +// to prevent this bad behaviour. +func (trie *Trie) Get(key Prefix) (item Item) { + _, node, found, leftover := trie.findSubtree(key) + if !found || len(leftover) != 0 { + return nil + } + return node.item +} + +// Match returns what Get(prefix) != nil would return. The same warning as for +// Get applies here as well. +func (trie *Trie) Match(prefix Prefix) (matchedExactly bool) { + return trie.Get(prefix) != nil +} + +// MatchSubtree returns true when there is a subtree representing extensions +// to key, that is if there are any keys in the tree which have key as prefix. +func (trie *Trie) MatchSubtree(key Prefix) (matched bool) { + _, _, matched, _ = trie.findSubtree(key) + return +} + +// Visit calls visitor on every node containing a non-nil item +// in alphabetical order. +// +// If an error is returned from visitor, the function stops visiting the tree +// and returns that error, unless it is a special error - SkipSubtree. In that +// case Visit skips the subtree represented by the current node and continues +// elsewhere. +func (trie *Trie) Visit(visitor VisitorFunc) error { + return trie.walk(nil, visitor) +} + +func (trie *Trie) size() int { + n := 0 + + trie.walk(nil, func(prefix Prefix, item Item) error { + n++ + return nil + }) + + return n +} + +func (trie *Trie) total() int { + return 1 + trie.children.total() +} + +// VisitSubtree works much like Visit, but it only visits nodes matching prefix. +func (trie *Trie) VisitSubtree(prefix Prefix, visitor VisitorFunc) error { + // Nil prefix not allowed. + if prefix == nil { + panic(ErrNilPrefix) + } + + // Empty trie must be handled explicitly. + if trie.prefix == nil { + return nil + } + + // Locate the relevant subtree. + _, root, found, leftover := trie.findSubtree(prefix) + if !found { + return nil + } + prefix = append(prefix, leftover...) + + // Visit it. + return root.walk(prefix, visitor) +} + +// VisitPrefixes visits only nodes that represent prefixes of key. +// To say the obvious, returning SkipSubtree from visitor makes no sense here. +func (trie *Trie) VisitPrefixes(key Prefix, visitor VisitorFunc) error { + // Nil key not allowed. + if key == nil { + panic(ErrNilPrefix) + } + + // Empty trie must be handled explicitly. + if trie.prefix == nil { + return nil + } + + // Walk the path matching key prefixes. + node := trie + prefix := key + offset := 0 + for { + // Compute what part of prefix matches. + common := node.longestCommonPrefixLength(key) + key = key[common:] + offset += common + + // Partial match means that there is no subtree matching prefix. + if common < len(node.prefix) { + return nil + } + + // Call the visitor. + if item := node.item; item != nil { + if err := visitor(prefix[:offset], item); err != nil { + return err + } + } + + if len(key) == 0 { + // This node represents key, we are finished. + return nil + } + + // There is some key suffix left, move to the children. + child := node.children.next(key[0]) + if child == nil { + // There is nowhere to continue, return. + return nil + } + + node = child + } +} + +// Delete deletes the item represented by the given prefix. +// +// True is returned if the matching node was found and deleted. +func (trie *Trie) Delete(key Prefix) (deleted bool) { + // Nil prefix not allowed. + if key == nil { + panic(ErrNilPrefix) + } + + // Empty trie must be handled explicitly. + if trie.prefix == nil { + return false + } + + // Find the relevant node. + path, found, _ := trie.findSubtreePath(key) + if !found { + return false + } + + node := path[len(path)-1] + var parent *Trie + if len(path) != 1 { + parent = path[len(path)-2] + } + + // If the item is already set to nil, there is nothing to do. + if node.item == nil { + return false + } + + // Delete the item. + node.item = nil + + // Initialise i before goto. + // Will be used later in a loop. + i := len(path) - 1 + + // In case there are some child nodes, we cannot drop the whole subtree. + // We can try to compact nodes, though. + if node.children.length() != 0 { + goto Compact + } + + // In case we are at the root, just reset it and we are done. + if parent == nil { + node.reset() + return true + } + + // We can drop a subtree. + // Find the first ancestor that has its value set or it has 2 or more child nodes. + // That will be the node where to drop the subtree at. + for ; i >= 0; i-- { + if current := path[i]; current.item != nil || current.children.length() >= 2 { + break + } + } + + // Handle the case when there is no such node. + // In other words, we can reset the whole tree. + if i == -1 { + path[0].reset() + return true + } + + // We can just remove the subtree here. + node = path[i] + if i == 0 { + parent = nil + } else { + parent = path[i-1] + } + // i+1 is always a valid index since i is never pointing to the last node. + // The loop above skips at least the last node since we are sure that the item + // is set to nil and it has no children, othewise we would be compacting instead. + node.children.remove(path[i+1].prefix[0]) + +Compact: + // The node is set to the first non-empty ancestor, + // so try to compact since that might be possible now. + if compacted := node.compact(); compacted != node { + if parent == nil { + *node = *compacted + } else { + parent.children.replace(node.prefix[0], compacted) + *parent = *parent.compact() + } + } + + return true +} + +// DeleteSubtree finds the subtree exactly matching prefix and deletes it. +// +// True is returned if the subtree was found and deleted. +func (trie *Trie) DeleteSubtree(prefix Prefix) (deleted bool) { + // Nil prefix not allowed. + if prefix == nil { + panic(ErrNilPrefix) + } + + // Empty trie must be handled explicitly. + if trie.prefix == nil { + return false + } + + // Locate the relevant subtree. + parent, root, found, _ := trie.findSubtree(prefix) + if !found { + return false + } + + // If we are in the root of the trie, reset the trie. + if parent == nil { + root.reset() + return true + } + + // Otherwise remove the root node from its parent. + parent.children.remove(root.prefix[0]) + return true +} + +// Internal helper methods ----------------------------------------------------- + +func (trie *Trie) empty() bool { + return trie.item == nil && trie.children.length() == 0 +} + +func (trie *Trie) reset() { + trie.prefix = nil + trie.children = newSparseChildList(trie.maxPrefixPerNode) +} + +func (trie *Trie) put(key Prefix, item Item, replace bool) (inserted bool) { + // Nil prefix not allowed. + if key == nil { + panic(ErrNilPrefix) + } + + var ( + common int + node *Trie = trie + child *Trie + ) + + if node.prefix == nil { + if len(key) <= trie.maxPrefixPerNode { + node.prefix = key + goto InsertItem + } + node.prefix = key[:trie.maxPrefixPerNode] + key = key[trie.maxPrefixPerNode:] + goto AppendChild + } + + for { + // Compute the longest common prefix length. + common = node.longestCommonPrefixLength(key) + key = key[common:] + + // Only a part matches, split. + if common < len(node.prefix) { + goto SplitPrefix + } + + // common == len(node.prefix) since never (common > len(node.prefix)) + // common == len(former key) <-> 0 == len(key) + // -> former key == node.prefix + if len(key) == 0 { + goto InsertItem + } + + // Check children for matching prefix. + child = node.children.next(key[0]) + if child == nil { + goto AppendChild + } + node = child + } + +SplitPrefix: + // Split the prefix if necessary. + child = new(Trie) + *child = *node + *node = *NewTrie() + node.prefix = child.prefix[:common] + child.prefix = child.prefix[common:] + child = child.compact() + node.children = node.children.add(child) + +AppendChild: + // Keep appending children until whole prefix is inserted. + // This loop starts with empty node.prefix that needs to be filled. + for len(key) != 0 { + child := NewTrie() + if len(key) <= trie.maxPrefixPerNode { + child.prefix = key + node.children = node.children.add(child) + node = child + goto InsertItem + } else { + child.prefix = key[:trie.maxPrefixPerNode] + key = key[trie.maxPrefixPerNode:] + node.children = node.children.add(child) + node = child + } + } + +InsertItem: + // Try to insert the item if possible. + if replace || node.item == nil { + node.item = item + return true + } + return false +} + +func (trie *Trie) compact() *Trie { + // Only a node with a single child can be compacted. + if trie.children.length() != 1 { + return trie + } + + child := trie.children.head() + + // If any item is set, we cannot compact since we want to retain + // the ability to do searching by key. This makes compaction less usable, + // but that simply cannot be avoided. + if trie.item != nil || child.item != nil { + return trie + } + + // Make sure the combined prefixes fit into a single node. + if len(trie.prefix)+len(child.prefix) > trie.maxPrefixPerNode { + return trie + } + + // Concatenate the prefixes, move the items. + child.prefix = append(trie.prefix, child.prefix...) + if trie.item != nil { + child.item = trie.item + } + + return child +} + +func (trie *Trie) findSubtree(prefix Prefix) (parent *Trie, root *Trie, found bool, leftover Prefix) { + // Find the subtree matching prefix. + root = trie + for { + // Compute what part of prefix matches. + common := root.longestCommonPrefixLength(prefix) + prefix = prefix[common:] + + // We used up the whole prefix, subtree found. + if len(prefix) == 0 { + found = true + leftover = root.prefix[common:] + return + } + + // Partial match means that there is no subtree matching prefix. + if common < len(root.prefix) { + leftover = root.prefix[common:] + return + } + + // There is some prefix left, move to the children. + child := root.children.next(prefix[0]) + if child == nil { + // There is nowhere to continue, there is no subtree matching prefix. + return + } + + parent = root + root = child + } +} + +func (trie *Trie) findSubtreePath(prefix Prefix) (path []*Trie, found bool, leftover Prefix) { + // Find the subtree matching prefix. + root := trie + var subtreePath []*Trie + for { + // Append the current root to the path. + subtreePath = append(subtreePath, root) + + // Compute what part of prefix matches. + common := root.longestCommonPrefixLength(prefix) + prefix = prefix[common:] + + // We used up the whole prefix, subtree found. + if len(prefix) == 0 { + path = subtreePath + found = true + leftover = root.prefix[common:] + return + } + + // Partial match means that there is no subtree matching prefix. + if common < len(root.prefix) { + leftover = root.prefix[common:] + return + } + + // There is some prefix left, move to the children. + child := root.children.next(prefix[0]) + if child == nil { + // There is nowhere to continue, there is no subtree matching prefix. + return + } + + root = child + } +} + +func (trie *Trie) walk(actualRootPrefix Prefix, visitor VisitorFunc) error { + var prefix Prefix + // Allocate a bit more space for prefix at the beginning. + if actualRootPrefix == nil { + prefix = make(Prefix, 32+len(trie.prefix)) + copy(prefix, trie.prefix) + prefix = prefix[:len(trie.prefix)] + } else { + prefix = make(Prefix, 32+len(actualRootPrefix)) + copy(prefix, actualRootPrefix) + prefix = prefix[:len(actualRootPrefix)] + } + + // Visit the root first. Not that this works for empty trie as well since + // in that case item == nil && len(children) == 0. + if trie.item != nil { + if err := visitor(prefix, trie.item); err != nil { + if err == SkipSubtree { + return nil + } + return err + } + } + + // Then continue to the children. + return trie.children.walk(&prefix, visitor) +} + +func (trie *Trie) longestCommonPrefixLength(prefix Prefix) (i int) { + for ; i < len(prefix) && i < len(trie.prefix) && prefix[i] == trie.prefix[i]; i++ { + } + return +} + +func (trie *Trie) dump() string { + writer := &bytes.Buffer{} + trie.print(writer, 0) + return writer.String() +} + +func (trie *Trie) print(writer io.Writer, indent int) { + fmt.Fprintf(writer, "%s%s %v\n", strings.Repeat(" ", indent), string(trie.prefix), trie.item) + trie.children.print(writer, indent+2) +} + +// Errors ---------------------------------------------------------------------- + +var ( + SkipSubtree = errors.New("Skip this subtree") + ErrNilPrefix = errors.New("Nil prefix passed into a method call") +) diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go new file mode 100644 index 0000000000..9a2dfd33a7 --- /dev/null +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -0,0 +1,401 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +import "sort" + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +// +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized +// version is not expected or desired. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +// ByVersion implements [sort.Interface] for sorting semantic version strings. +type ByVersion []string + +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { + cmp := Compare(vs[i], vs[j]) + if cmp != 0 { + return cmp < 0 + } + return vs[i] < vs[j] +} + +// Sort sorts a list of semantic version strings using [ByVersion]. +func Sort(list []string) { + sort.Sort(ByVersion(list)) +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + return + } + } + if v != "" { + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 0000000000..3d6f516a59 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 0000000000..84fcc32b63 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authentication + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 0000000000..811c2e4e96 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 0000000000..3d66bdef9d --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,31 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +type direct struct{} + +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. +var Direct = direct{} + +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 0000000000..573fe79e86 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 0000000000..9ff4b9a776 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. +func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return forward + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return forward + } + proxy, err := FromURL(proxyURL, forward) + if err != nil { + return forward + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, forward) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5", "socks5h": + addr := u.Hostname() + port := u.Port() + if port == "" { + port = "1080" + } + return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 0000000000..c91651f96d --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,42 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + + "golang.org/x/net/internal/socks" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } + } else { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } + } + } + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, + } + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate + } + return d, nil +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000000..b18efb743f --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,132 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := withCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. +// +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) +} diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 0000000000..f93c740b63 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 0000000000..88ce33434e --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go new file mode 100644 index 0000000000..3bf40fdfec --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package execabs is a drop-in replacement for os/exec +// that requires PATH lookups to find absolute paths. +// That is, execabs.Command("cmd") runs the same PATH lookup +// as exec.Command("cmd"), but if the result is a path +// which is relative, the Run and Start methods will report +// an error instead of running the executable. +// +// See https://blog.golang.org/path-security for more information +// about when it may be necessary or appropriate to use this package. +package execabs + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "reflect" + "unsafe" +) + +// ErrNotFound is the error resulting if a path search failed to find an executable file. +// It is an alias for exec.ErrNotFound. +var ErrNotFound = exec.ErrNotFound + +// Cmd represents an external command being prepared or run. +// It is an alias for exec.Cmd. +type Cmd = exec.Cmd + +// Error is returned by LookPath when it fails to classify a file as an executable. +// It is an alias for exec.Error. +type Error = exec.Error + +// An ExitError reports an unsuccessful exit by a command. +// It is an alias for exec.ExitError. +type ExitError = exec.ExitError + +func relError(file, path string) error { + return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) +} + +// LookPath searches for an executable named file in the directories +// named by the PATH environment variable. If file contains a slash, +// it is tried directly and the PATH is not consulted. The result will be +// an absolute path. +// +// LookPath differs from exec.LookPath in its handling of PATH lookups, +// which are used for file names without slashes. If exec.LookPath's +// PATH lookup would have returned an executable from the current directory, +// LookPath instead returns an error. +func LookPath(file string) (string, error) { + path, err := exec.LookPath(file) + if err != nil && !isGo119ErrDot(err) { + return "", err + } + if filepath.Base(file) == file && !filepath.IsAbs(path) { + return "", relError(file, path) + } + return path, nil +} + +func fixCmd(name string, cmd *exec.Cmd) { + if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { + // exec.Command was called with a bare binary name and + // exec.LookPath returned a path which is not absolute. + // Set cmd.lookPathErr and clear cmd.Path so that it + // cannot be run. + lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) + if *lookPathErr == nil { + *lookPathErr = relError(name, cmd.Path) + } + cmd.Path = "" + } +} + +// CommandContext is like Command but includes a context. +// +// The provided context is used to kill the process (by calling os.Process.Kill) +// if the context becomes done before the command completes on its own. +func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, name, arg...) + fixCmd(name, cmd) + return cmd + +} + +// Command returns the Cmd struct to execute the named program with the given arguments. +// See exec.Command for most details. +// +// Command differs from exec.Command in its handling of PATH lookups, +// which are used when the program name contains no slashes. +// If exec.Command would have returned an exec.Cmd configured to run an +// executable from the current directory, Command instead +// returns an exec.Cmd that will return an error from Start or Run. +func Command(name string, arg ...string) *exec.Cmd { + cmd := exec.Command(name, arg...) + fixCmd(name, cmd) + return cmd +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 0000000000..5627d70e39 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 + +package execabs + +import "os/exec" + +func isGo119ErrDot(err error) bool { + return false +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 0000000000..d60ab1b419 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,20 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 + +package execabs + +import ( + "errors" + "os/exec" +) + +func isGo119ErrDot(err error) bool { + return errors.Is(err, exec.ErrDot) +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return cmd.Err != nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 0000000000..fd8632444e --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 0000000000..bbf86ccf0c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 0000000000..f533091c19 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 0000000000..74db26b94d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 0000000000..fc1835d8a2 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go new file mode 100644 index 0000000000..2b19c93e8e --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -0,0 +1,660 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer +// interface. Given the name of a (signed or unsigned) integer type T that has constants +// defined, stringer will create a new self-contained Go source file implementing +// +// func (t T) String() string +// +// The file is created in the same package and directory as the package that defines T. +// It has helpful defaults designed for use with go generate. +// +// Stringer works best with constants that are consecutive values such as created using iota, +// but creates good code regardless. In the future it might also provide custom support for +// constant sets that are bit patterns. +// +// For example, given this snippet, +// +// package painkiller +// +// type Pill int +// +// const ( +// Placebo Pill = iota +// Aspirin +// Ibuprofen +// Paracetamol +// Acetaminophen = Paracetamol +// ) +// +// running this command +// +// stringer -type=Pill +// +// in the same directory will create the file pill_string.go, in package painkiller, +// containing a definition of +// +// func (Pill) String() string +// +// That method will translate the value of a Pill constant to the string representation +// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will +// print the string "Aspirin". +// +// Typically this process would be run using go generate, like this: +// +// //go:generate stringer -type=Pill +// +// If multiple constants have the same value, the lexically first matching name will +// be used (in the example, Acetaminophen will print as "Paracetamol"). +// +// With no arguments, it processes the package in the current directory. +// Otherwise, the arguments must name a single directory holding a Go package +// or a set of Go source files that represent a single Go package. +// +// The -type flag accepts a comma-separated list of types so a single run can +// generate methods for multiple types. The default output file is t_string.go, +// where t is the lower-cased name of the first type listed. It can be overridden +// with the -output flag. +// +// The -linecomment flag tells stringer to generate the text of any line comment, trimmed +// of leading spaces, instead of the constant name. For instance, if the constants above had a +// Pill prefix, one could write +// +// PillAspirin // Aspirin +// +// to suppress it in the output. +package main // import "golang.org/x/tools/cmd/stringer" + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/constant" + "go/format" + "go/token" + "go/types" + "log" + "os" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/go/packages" +) + +var ( + typeNames = flag.String("type", "", "comma-separated list of type names; must be set") + output = flag.String("output", "", "output file name; default srcdir/_string.go") + trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names") + linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present") + buildTags = flag.String("tags", "", "comma-separated list of build tags to apply") +) + +// Usage is a replacement usage function for the flags package. +func Usage() { + fmt.Fprintf(os.Stderr, "Usage of stringer:\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n") + fmt.Fprintf(os.Stderr, "For more information, see:\n") + fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + log.SetFlags(0) + log.SetPrefix("stringer: ") + flag.Usage = Usage + flag.Parse() + if len(*typeNames) == 0 { + flag.Usage() + os.Exit(2) + } + types := strings.Split(*typeNames, ",") + var tags []string + if len(*buildTags) > 0 { + tags = strings.Split(*buildTags, ",") + } + + // We accept either one directory or a list of files. Which do we have? + args := flag.Args() + if len(args) == 0 { + // Default: process whole package in current directory. + args = []string{"."} + } + + // Parse the package once. + var dir string + g := Generator{ + trimPrefix: *trimprefix, + lineComment: *linecomment, + } + // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). + if len(args) == 1 && isDirectory(args[0]) { + dir = args[0] + } else { + if len(tags) != 0 { + log.Fatal("-tags option applies only to directories, not when files are specified") + } + dir = filepath.Dir(args[0]) + } + + g.parsePackage(args, tags) + + // Print the header and package clause. + g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) + g.Printf("\n") + g.Printf("package %s", g.pkg.name) + g.Printf("\n") + g.Printf("import \"strconv\"\n") // Used by all methods. + + // Run generate for each type. + for _, typeName := range types { + g.generate(typeName) + } + + // Format the output. + src := g.format() + + // Write to file. + outputName := *output + if outputName == "" { + baseName := fmt.Sprintf("%s_string.go", types[0]) + outputName = filepath.Join(dir, strings.ToLower(baseName)) + } + err := os.WriteFile(outputName, src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } +} + +// isDirectory reports whether the named file is a directory. +func isDirectory(name string) bool { + info, err := os.Stat(name) + if err != nil { + log.Fatal(err) + } + return info.IsDir() +} + +// Generator holds the state of the analysis. Primarily used to buffer +// the output for format.Source. +type Generator struct { + buf bytes.Buffer // Accumulated output. + pkg *Package // Package we are scanning. + + trimPrefix string + lineComment bool + + logf func(format string, args ...interface{}) // test logging hook; nil when not testing +} + +func (g *Generator) Printf(format string, args ...interface{}) { + fmt.Fprintf(&g.buf, format, args...) +} + +// File holds a single parsed file and associated data. +type File struct { + pkg *Package // Package to which this file belongs. + file *ast.File // Parsed AST. + // These fields are reset for each type being generated. + typeName string // Name of the constant type. + values []Value // Accumulator for constant values of that type. + + trimPrefix string + lineComment bool +} + +type Package struct { + name string + defs map[*ast.Ident]types.Object + files []*File +} + +// parsePackage analyzes the single package constructed from the patterns and tags. +// parsePackage exits if there is an error. +func (g *Generator) parsePackage(patterns []string, tags []string) { + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, + // TODO: Need to think about constants in test files. Maybe write type_string_test.go + // in a separate pass? For later. + Tests: false, + BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + Logf: g.logf, + } + pkgs, err := packages.Load(cfg, patterns...) + if err != nil { + log.Fatal(err) + } + if len(pkgs) != 1 { + log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) + } + g.addPackage(pkgs[0]) +} + +// addPackage adds a type checked Package and its syntax files to the generator. +func (g *Generator) addPackage(pkg *packages.Package) { + g.pkg = &Package{ + name: pkg.Name, + defs: pkg.TypesInfo.Defs, + files: make([]*File, len(pkg.Syntax)), + } + + for i, file := range pkg.Syntax { + g.pkg.files[i] = &File{ + file: file, + pkg: g.pkg, + trimPrefix: g.trimPrefix, + lineComment: g.lineComment, + } + } +} + +// generate produces the String method for the named type. +func (g *Generator) generate(typeName string) { + values := make([]Value, 0, 100) + for _, file := range g.pkg.files { + // Set the state for this run of the walker. + file.typeName = typeName + file.values = nil + if file.file != nil { + ast.Inspect(file.file, file.genDecl) + values = append(values, file.values...) + } + } + + if len(values) == 0 { + log.Fatalf("no values defined for type %s", typeName) + } + // Generate code that will fail if the constants change value. + g.Printf("func _() {\n") + g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") + g.Printf("\t// Re-run the stringer command to generate them again.\n") + g.Printf("\tvar x [1]struct{}\n") + for _, v := range values { + g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str) + } + g.Printf("}\n") + runs := splitIntoRuns(values) + // The decision of which pattern to use depends on the number of + // runs in the numbers. If there's only one, it's easy. For more than + // one, there's a tradeoff between complexity and size of the data + // and code vs. the simplicity of a map. A map takes more space, + // but so does the code. The decision here (crossover at 10) is + // arbitrary, but considers that for large numbers of runs the cost + // of the linear scan in the switch might become important, and + // rather than use yet another algorithm such as binary search, + // we punt and use a map. In any case, the likelihood of a map + // being necessary for any realistic example other than bitmasks + // is very low. And bitmasks probably deserve their own analysis, + // to be done some other day. + switch { + case len(runs) == 1: + g.buildOneRun(runs, typeName) + case len(runs) <= 10: + g.buildMultipleRuns(runs, typeName) + default: + g.buildMap(runs, typeName) + } +} + +// splitIntoRuns breaks the values into runs of contiguous sequences. +// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}. +// The input slice is known to be non-empty. +func splitIntoRuns(values []Value) [][]Value { + // We use stable sort so the lexically first name is chosen for equal elements. + sort.Stable(byValue(values)) + // Remove duplicates. Stable sort has put the one we want to print first, + // so use that one. The String method won't care about which named constant + // was the argument, so the first name for the given value is the only one to keep. + // We need to do this because identical values would cause the switch or map + // to fail to compile. + j := 1 + for i := 1; i < len(values); i++ { + if values[i].value != values[i-1].value { + values[j] = values[i] + j++ + } + } + values = values[:j] + runs := make([][]Value, 0, 10) + for len(values) > 0 { + // One contiguous sequence per outer loop. + i := 1 + for i < len(values) && values[i].value == values[i-1].value+1 { + i++ + } + runs = append(runs, values[:i]) + values = values[i:] + } + return runs +} + +// format returns the gofmt-ed contents of the Generator's buffer. +func (g *Generator) format() []byte { + src, err := format.Source(g.buf.Bytes()) + if err != nil { + // Should never happen, but can arise when developing this code. + // The user can compile the output to see the error. + log.Printf("warning: internal error: invalid Go generated: %s", err) + log.Printf("warning: compile the package to analyze the error") + return g.buf.Bytes() + } + return src +} + +// Value represents a declared constant. +type Value struct { + originalName string // The name of the constant. + name string // The name with trimmed prefix. + // The value is stored as a bit pattern alone. The boolean tells us + // whether to interpret it as an int64 or a uint64; the only place + // this matters is when sorting. + // Much of the time the str field is all we need; it is printed + // by Value.String. + value uint64 // Will be converted to int64 when needed. + signed bool // Whether the constant is a signed type. + str string // The string representation given by the "go/constant" package. +} + +func (v *Value) String() string { + return v.str +} + +// byValue lets us sort the constants into increasing order. +// We take care in the Less method to sort in signed or unsigned order, +// as appropriate. +type byValue []Value + +func (b byValue) Len() int { return len(b) } +func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byValue) Less(i, j int) bool { + if b[i].signed { + return int64(b[i].value) < int64(b[j].value) + } + return b[i].value < b[j].value +} + +// genDecl processes one declaration clause. +func (f *File) genDecl(node ast.Node) bool { + decl, ok := node.(*ast.GenDecl) + if !ok || decl.Tok != token.CONST { + // We only care about const declarations. + return true + } + // The name of the type of the constants we are declaring. + // Can change if this is a multi-element declaration. + typ := "" + // Loop over the elements of the declaration. Each element is a ValueSpec: + // a list of names possibly followed by a type, possibly followed by values. + // If the type and value are both missing, we carry down the type (and value, + // but the "go/types" package takes care of that). + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. + if vspec.Type == nil && len(vspec.Values) > 0 { + // "X = 1". With no type but a value. If the constant is untyped, + // skip this vspec and reset the remembered type. + typ = "" + + // If this is a simple type conversion, remember the type. + // We don't mind if this is actually a call; a qualified call won't + // be matched (that will be SelectorExpr, not Ident), and only unusual + // situations will result in a function call that appears to be + // a type conversion. + ce, ok := vspec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + id, ok := ce.Fun.(*ast.Ident) + if !ok { + continue + } + typ = id.Name + } + if vspec.Type != nil { + // "X T". We have a type. Remember it. + ident, ok := vspec.Type.(*ast.Ident) + if !ok { + continue + } + typ = ident.Name + } + if typ != f.typeName { + // This is not the type we're looking for. + continue + } + // We now have a list of names (from one line of source code) all being + // declared with the desired type. + // Grab their names and actual values and store them in f.values. + for _, name := range vspec.Names { + if name.Name == "_" { + continue + } + // This dance lets the type checker find the values for us. It's a + // bit tricky: look up the object declared by the name, find its + // types.Const, and extract its value. + obj, ok := f.pkg.defs[name] + if !ok { + log.Fatalf("no value for constant %s", name) + } + info := obj.Type().Underlying().(*types.Basic).Info() + if info&types.IsInteger == 0 { + log.Fatalf("can't handle non-integer constant type %s", typ) + } + value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. + if value.Kind() != constant.Int { + log.Fatalf("can't happen: constant is not an integer %s", name) + } + i64, isInt := constant.Int64Val(value) + u64, isUint := constant.Uint64Val(value) + if !isInt && !isUint { + log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) + } + if !isInt { + u64 = uint64(i64) + } + v := Value{ + originalName: name.Name, + value: u64, + signed: info&types.IsUnsigned == 0, + str: value.String(), + } + if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 { + v.name = strings.TrimSpace(c.Text()) + } else { + v.name = strings.TrimPrefix(v.originalName, f.trimPrefix) + } + f.values = append(f.values, v) + } + } + return false +} + +// Helpers + +// usize returns the number of bits of the smallest unsigned integer +// type that will hold n. Used to create the smallest possible slice of +// integers to use as indexes into the concatenated strings. +func usize(n int) int { + switch { + case n < 1<<8: + return 8 + case n < 1<<16: + return 16 + default: + // 2^32 is enough constants for anyone. + return 32 + } +} + +// declareIndexAndNameVars declares the index slices and concatenated names +// strings representing the runs of values. +func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) { + var indexes, names []string + for i, run := range runs { + index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i)) + if len(run) != 1 { + indexes = append(indexes, index) + } + names = append(names, name) + } + g.Printf("const (\n") + for _, name := range names { + g.Printf("\t%s\n", name) + } + g.Printf(")\n\n") + + if len(indexes) > 0 { + g.Printf("var (") + for _, index := range indexes { + g.Printf("\t%s\n", index) + } + g.Printf(")\n\n") + } +} + +// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars +func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) { + index, name := g.createIndexAndNameDecl(run, typeName, "") + g.Printf("const %s\n", name) + g.Printf("var %s\n", index) +} + +// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var". +func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) { + b := new(bytes.Buffer) + indexes := make([]int, len(run)) + for i := range run { + b.WriteString(run[i].name) + indexes[i] = b.Len() + } + nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String()) + nameLen := b.Len() + b.Reset() + fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen)) + for i, v := range indexes { + if i > 0 { + fmt.Fprintf(b, ", ") + } + fmt.Fprintf(b, "%d", v) + } + fmt.Fprintf(b, "}") + return b.String(), nameConst +} + +// declareNameVars declares the concatenated names string representing all the values in the runs. +func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) { + g.Printf("const _%s_name%s = \"", typeName, suffix) + for _, run := range runs { + for i := range run { + g.Printf("%s", run[i].name) + } + } + g.Printf("\"\n") +} + +// buildOneRun generates the variables and String method for a single run of contiguous values. +func (g *Generator) buildOneRun(runs [][]Value, typeName string) { + values := runs[0] + g.Printf("\n") + g.declareIndexAndNameVar(values, typeName) + // The generated code is simple enough to write as a Printf format. + lessThanZero := "" + if values[0].signed { + lessThanZero = "i < 0 || " + } + if values[0].value == 0 { // Signed or unsigned, 0 is still 0. + g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero) + } else { + g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero) + } +} + +// Arguments to format are: +// +// [1]: type name +// [2]: size of index element (8 for uint8 etc.) +// [3]: less than zero check (for signed types) +const stringOneRun = `func (i %[1]s) String() string { + if %[3]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]] +} +` + +// Arguments to format are: +// [1]: type name +// [2]: lowest defined value for type, as a string +// [3]: size of index element (8 for uint8 etc.) +// [4]: less than zero check (for signed types) +/* + */ +const stringOneRunWithOffset = `func (i %[1]s) String() string { + i -= %[2]s + if %[4]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]] +} +` + +// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values. +// For this pattern, a single Printf format won't do. +func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareIndexAndNameVars(runs, typeName) + g.Printf("func (i %s) String() string {\n", typeName) + g.Printf("\tswitch {\n") + for i, values := range runs { + if len(values) == 1 { + g.Printf("\tcase i == %s:\n", &values[0]) + g.Printf("\t\treturn _%s_name_%d\n", typeName, i) + continue + } + if values[0].value == 0 && !values[0].signed { + // For an unsigned lower bound of 0, "0 <= i" would be redundant. + g.Printf("\tcase i <= %s:\n", &values[len(values)-1]) + } else { + g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) + } + if values[0].value != 0 { + g.Printf("\t\ti -= %s\n", &values[0]) + } + g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n", + typeName, i, typeName, i, typeName, i) + } + g.Printf("\tdefault:\n") + g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName) + g.Printf("\t}\n") + g.Printf("}\n") +} + +// buildMap handles the case where the space is so sparse a map is a reasonable fallback. +// It's a rare situation but has simple code. +func (g *Generator) buildMap(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareNameVars(runs, typeName, "") + g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName) + n := 0 + for _, values := range runs { + for _, value := range values { + g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name)) + n += len(value.name) + } + } + g.Printf("}\n\n") + g.Printf(stringMap, typeName) +} + +// Argument to format is the type name. +const stringMap = `func (i %[1]s) String() string { + if str, ok := _%[1]s_map[i]; ok { + return str + } + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" +} +` diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 0000000000..03543bd4bb --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,186 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "go/token" + "go/types" + "io" + "os/exec" + + "golang.org/x/tools/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the go command. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func Find(importPath, srcDir string) (filename, path string) { + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, size, err := gcimporter.FindExportData(buf) + if err != nil { + return nil, err + } + + if size >= 0 { + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil + } else { + // We were given an object file. As such, we don't know how large + // the export data is and must return the entire file. + return buf, nil + } +} + +// readAll works the same way as io.ReadAll, but avoids allocations and copies +// by preallocating a byte slice of the necessary size if the size is known up +// front. This is always possible when the input is an archive. In that case, +// NewReader will return the known size using an io.LimitedReader. +func readAll(r io.Reader) ([]byte, error) { + if lr, ok := r.(*io.LimitedReader); ok { + data := make([]byte, lr.N) + _, err := io.ReadFull(lr, data) + return data, err + } + return io.ReadAll(r) +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'u': // unified, from go1.20 + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + if _, err := io.WriteString(out, "i"); err != nil { + return err + } + return gcimporter.IExportData(out, fset, pkg) +} + +// ReadBundle reads an export bundle from in, decodes it, and returns type +// information for the packages. +// File position information is added to fset. +// +// ReadBundle may inspect and add to the imports map to ensure that references +// within the export bundle to other packages are consistent. +// +// On return, the state of the reader is undefined. +// +// Experimental: This API is experimental and may change in the future. +func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export bundle: %v", err) + } + return gcimporter.IImportBundle(fset, imports, data) +} + +// WriteBundle writes encoded type information for the specified packages to out. +// The FileSet provides file position information for named objects. +// +// Experimental: This API is experimental and may change in the future. +func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return gcimporter.IExportBundle(out, fset, pkgs) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 0000000000..37a7247e26 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 0000000000..0454cdd78e --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,48 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "context" + "fmt" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +var debug = false + +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return "", "", friendlyErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 0000000000..a7a8f73e3d --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,220 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypesInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 0000000000..7242a0a7d2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,101 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + exec "golang.org/x/sys/execabs" + "os" + "strings" +) + +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. +type driverRequest struct { + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) + } + + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 0000000000..1f1eade0ac --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1181 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { + for _, pkg := range dr.Packages { + r.addPackage(pkg) + } + for _, root := range dr.Roots { + r.addRoot(root) + } + r.dr.GoVersion = dr.GoVersion +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool +} + +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError +} + +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() + if err != nil { + panic(fmt.Sprintf("mustGetEnv: %v", err)) + } + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + + // Fill in response.Sizes asynchronously if necessary. + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + sizeserr = err + response.dr.Compiler = compiler + response.dr.Arch = arch + sizeswg.Done() + }() + } + + // Determine files requested in contains patterns + var containFiles []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := state.createDriverResponse(restPatterns...) + if err != nil { + return nil, err + } + response.addAll(dr) + } + + if len(containFiles) != 0 { + if err := state.runContainsQueries(response, containFiles); err != nil { + return nil, err + } + } + + // Only use go/packages' overlay processing if we're using a Go version + // below 1.16. Otherwise, go list handles it. + if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return nil, err + } + + var containsCandidates []string + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{{ + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }}, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + // Add root for any package that matches a pattern. This applies only to + // packages that are modified by overlays, since they are not added as + // roots automatically. + for _, pattern := range restPatterns { + match := matchPattern(pattern) + for _, pkgID := range modifiedPkgs { + pkg, ok := response.seenPackages[pkgID] + if !ok { + continue + } + if match(pkg.PkgPath) { + response.addRoot(pkg.ID) + } + } + } + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + return response.dr, nil +} + +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } + dr, err := state.createDriverResponse(pkgs...) + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return err + } + return state.addNeededOverlayPackages(response, needPkgs) +} + +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + return response, nil +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *packagesinternal.PackageError + DepsErrors []*packagesinternal.PackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) + if err != nil { + return nil, err + } + + seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) + // Decode the JSON and convert it to Package form. + response := &driverResponse{ + GoVersion: goVersion, + } + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue + } + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] + } + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), + forTest: p.ForTest, + depsErrors: p.DepsErrors, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", + Kind: ListError, + }) + } + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { + response.Packages = append(response.Packages, pkg) + } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) + + return response, nil +} + +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +// getGoVersion returns the effective minor version of the go command. +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. +func (state *golistState) getPkgPath(dir string) (string, bool, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", false, err + } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, rdir) { + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true, nil + } + return filepath.ToSlash(r), true, nil + } + return "", false, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&NeedTypes != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&needInternalForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "-e", jsonFlag(cfg, goVersion), + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), + } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { + cfg := state.cfg + return gocommand.Invocation{ + BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, + CleanEnv: cfg.Env != nil, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + + // For Go versions 1.16 and above, `go list` accepts overlays directly via + // the -overlay flag. Set it, if it's available. + // + // The check for "list" is not necessarily required, but we should avoid + // getting the go version if possible. + if verb == "list" { + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + if goVersion >= 16 { + filename, cleanup, err := state.writeOverlays() + if err != nil { + return nil, err + } + defer cleanup() + inv.Overlay = filename + } + } + inv.Verb = verb + inv.Args = args + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + if err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't run 'go': %w", err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, friendlyErr + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + msg := msg[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, friendlyErr + } + } + return stdout, nil +} + +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +// +// From golang/go#39958. +type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` +} + +// writeOverlays writes out files for go list's -overlay flag, as described +// above. +func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(state.cfg.Overlay) == 0 { + return "", func() {}, nil + } + dir, err := os.MkdirTemp("", "gopackages-*") + if err != nil { + return "", nil, err + } + // The caller must clean up this directory, unless this function returns an + // error. + cleanup = func() { + os.RemoveAll(dir) + } + defer func() { + if err != nil { + cleanup() + } + }() + overlays := map[string]string{} + for k, v := range state.cfg.Overlay { + // Create a unique filename for the overlaid files, to avoid + // creating nested directories. + noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") + f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) + if err != nil { + return "", func() {}, err + } + if _, err := f.Write(v); err != nil { + return "", func() {}, err + } + if err := f.Close(); err != nil { + return "", func() {}, err + } + overlays[k] = f.Name() + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", func() {}, err + } + // Write out the overlay file that contains the filepath mappings. + filename = filepath.Join(dir, "overlay.json") + if err := os.WriteFile(filename, b, 0665); err != nil { + return "", func() {}, err + } + return filename, cleanup, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 0000000000..9576b472f9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,575 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - determining the correct package to add given a new import path +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + pkgOfDir := make(map[string][]*Package) + for _, pkg := range response.dr.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + dir, err := commonDir(pkg.GoFiles) + if err != nil { + return nil, nil, err + } + if dir != "" { + pkgOfDir[dir] = append(pkgOfDir[dir], pkg) + } + } + + // If no new imports are added, it is safe to avoid loading any needPkgs. + // Otherwise, it's hard to tell which package is actually being loaded + // (due to vendoring) and whether any modified package will show up + // in the transitive set of dependencies (because new imports are added, + // potentially modifying the transitive set of dependencies). + var overlayAddsImports bool + + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] + base := filepath.Base(opath) + dir := filepath.Dir(opath) + var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant + var testVariantOf *Package // if opath is a test file, this is the package it is testing + var fileExists bool + isTestFile := strings.HasSuffix(opath, "_test.go") + pkgName, ok := extractPackageName(opath, contents) + if !ok { + // Don't bother adding a file that doesn't even have a parsable package statement + // to the overlay. + continue + } + // If all the overlay files belong to a different package, change the + // package name to that package. + maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) + nextPackage: + for _, p := range response.dr.Packages { + if pkgName != p.Name && p.ID != "command-line-arguments" { + continue + } + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + // Make sure to capture information on the package's test variant, if needed. + if isTestFile && !hasTestFiles(p) { + // TODO(matloob): Are there packages other than the 'production' variant + // of a package that this can match? This shouldn't match the test main package + // because the file is generated in another directory. + testVariantOf = p + continue nextPackage + } else if !isTestFile && hasTestFiles(p) { + // We're examining a test variant, but the overlaid file is + // a non-test file. Because the overlay implementation + // (currently) only adds a file to one package, skip this + // package, so that we can add the file to the production + // variant of the package. (https://golang.org/issue/36857 + // tracks handling overlays on both the production and test + // variant of a package). + continue nextPackage + } + if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // We have already seen the production version of the + // for which p is a test variant. + if hasTestFiles(p) { + testVariantOf = pkg + } + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } + if !ok { + break + } + var forTest string // only set for x tests + isXTest := strings.HasSuffix(pkgName, "_test") + if isXTest { + forTest = pkgPath + pkgPath += "_test" + } + id := pkgPath + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) + } + } + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest + } + } + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + // TODO(rstambler): If the package is an x test and the import has + // a test variant, make sure to replace it. + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err + } + } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } + } + } + + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) + } + return state.resolveImport(sourceDir, id) + } + + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. + for _, pkg := range response.dr.Packages { + for _, imp := range pkg.Imports { + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + if overlayAddsImports { + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +// resolveImport finds the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + +func hasTestFiles(p *Package) bool { + for _, f := range p.GoFiles { + if strings.HasSuffix(f, "_test.go") { + return true + } + } + return false +} + +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() + if err != nil { + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } + } + roots := map[string]string{} + modules := map[string]string{} + var i int + for dec := json.NewDecoder(out); dec.More(); { + mod := new(gocommand.ModuleJSON) + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } + } + i++ + } + return roots, nil +} + +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { + m := map[string]string{} + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" + } + return m, nil +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} + +// commonDir returns the directory that all files are in, "" if files is empty, +// or an error if they aren't in the same directory. +func commonDir(files []string) (string, error) { + seen := make(map[string]bool) + for _, f := range files { + seen[filepath.Dir(f)] = true + } + if len(seen) > 1 { + return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) + } + for k := range seen { + // seen has only one element; return it. + return k, nil + } + return "", nil // no files +} + +// It is possible that the files in the disk directory dir have a different package +// name from newName, which is deduced from the overlays. If they all have a different +// package name, and they all have the same package name, then that name becomes +// the package name. +// It returns true if it changes the package name, false otherwise. +func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { + names := make(map[string]int) + for _, p := range pkgsOfDir { + names[p.Name]++ + } + if len(names) != 1 { + // some files are in different packages + return + } + var oldName string + for k := range names { + oldName = k + } + if newName == oldName { + return + } + // We might have a case where all of the package names in the directory are + // the same, but the overlay file is for an x test, which belongs to its + // own package. If the x test does not yet exist on disk, we may not yet + // have its package name on disk, but we should not rename the packages. + // + // We use a heuristic to determine if this file belongs to an x test: + // The test file should have a package name whose package name has a _test + // suffix or looks like "newName_test". + maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") + if isTestFile && maybeXTest { + return + } + for _, p := range pkgsOfDir { + p.Name = newName + } +} + +// This function is copy-pasted from +// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. +// It should be deleted when we remove support for overlays from go/packages. +// +// NOTE: This does not handle any ./... or ./ style queries, as this function +// doesn't know the working directory. +// +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +// Unfortunately, there are two special cases. Quoting "go help packages": +// +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separated pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +func matchPattern(pattern string) func(name string) bool { + // Convert pattern to regular expression. + // The strategy for the trailing /... is to nest it in an explicit ? expression. + // The strategy for the vendor exclusion is to change the unmatchable + // vendor strings to a disallowed code point (vendorChar) and to use + // "(anything but that codepoint)*" as the implementation of the ... wildcard. + // This is a bit complicated but the obvious alternative, + // namely a hand-written search like in most shell glob matchers, + // is too easy to make accidentally exponential. + // Using package regexp guarantees linear-time matching. + + const vendorChar = "\x00" + + if strings.Contains(pattern, vendorChar) { + return func(name string) bool { return false } + } + + re := regexp.QuoteMeta(pattern) + re = replaceVendor(re, vendorChar) + switch { + case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): + re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` + case re == vendorChar+`/\.\.\.`: + re = `(/vendor|/` + vendorChar + `/\.\.\.)` + case strings.HasSuffix(re, `/\.\.\.`): + re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` + } + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) + + reg := regexp.MustCompile(`^` + re + `$`) + + return func(name string) bool { + if strings.Contains(name, vendorChar) { + return false + } + return reg.MatchString(replaceVendor(name, vendorChar)) + } +} + +// replaceVendor returns the result of replacing +// non-trailing vendor path elements in x with repl. +func replaceVendor(x, repl string) string { + if !strings.Contains(x, "vendor") { + return x + } + elem := strings.Split(x, "/") + for i := 0; i < len(elem)-1; i++ { + if elem[i] == "vendor" { + elem[i] = repl + } + } + return strings.Join(elem, "/") +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 0000000000..5c080d21b5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return "LoadMode(0)" + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 0000000000..ece0e7c603 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1333 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportFile adds ExportFile. + NeedExportFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes + + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + NeedExportsFile = NeedExportFile +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the driverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = types.SizesFor(response.Compiler, response.Arch) + return l.refine(response) +} + +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + response, err := driver(cfg, patterns...) + if err != nil { + return response, err + } else if response.NotHandled { + return goListDriver(cfg, patterns...) + } + return response, nil +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + + // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that are suitable for type checking. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + // + // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are + // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string + + // depsErrors is the DepsErrors field from the go list response, if any. + depsErrors []*packagesinternal.PackageError + + // module is the module information for the package if it exists. + Module *Module +} + +// Module provides module information for a package. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } + packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + return p.(*Package).depsErrors + } + packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { + return config.(*Config).gocmdRunner + } + packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { + config.(*Config).gocmdRunner = runner + } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } + packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, + IgnoredFiles: p.IgnoredFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(response *driverResponse) ([]*Package, error) { + roots := response.Roots + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range response.Packages { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + lpkg := &loaderPackage{ + Package: pkg, + needtypes: needtypes, + needsrc: needsrc, + goVersion: response.GoVersion, + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + // If NeedImports isn't set, the imports fields will all be zeroed out. + if ld.Mode&NeedImports != 0 { + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&NeedImports == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, + // to catch programs that use more than they request. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil + } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // (Hence this return is after the Types assignment.) + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + if !lpkg.needsrc { + if err := ld.loadFromExportData(lpkg); err != nil { + lpkg.Errors = append(lpkg.Errors, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, // e.g. can't find/open/parse export data + }) + } + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + // + // Should we assert a hard minimum of (currently) go1.16 here? + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + _ = ld.loadFromExportData(lpkg) // ignore any secondary errors + + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + typeparams.InitInstanceInfo(lpkg.TypesInfo) + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) + } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = os.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData ensures that type information is present for the specified +// package, loading it from an export data file on the first request. +// On success it sets lpkg.Types to a new Package. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the lpkg.Types field and the + // types.Package it points to, for each loaderPackage in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } + if viewLen != len(view) { + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + return nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} + +var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 0000000000..a1dcc40b72 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 0000000000..fa5834baf7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,827 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "sort" + "strconv" + "strings" + _ "unsafe" + + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTC]; +// one of these (TypeParam) requires an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() + skipMethodSorting bool +} + +// Expose back doors so that gopls can avoid method sorting, which can dominate +// analysis on certain repositories. +// +// TODO(golang/go#61443): remove this. +func init() { + typesinternal.SkipEncoderMethodSorting = func(enc interface{}) { + enc.(*Encoder).skipMethodSorting = true + } + typesinternal.ObjectpathObject = object +} + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func (enc *Encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + objs := enc.scopeObjects(scope) + for _, o := range objs { + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, o.Name()...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path, nil); r != nil { + return Path(r), nil + } + } else { + if named, _ := T.(*types.Named); named != nil { + if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + // generic named type + return Path(r), nil + } + } + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, o := range objs { + path := append(empty, o.Name()...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + if !enc.skipMethodSorting { + // Note that method index here is always with respect + // to canonical ordering of methods, regardless of how + // they appear in the underlying type. + for i, m := range enc.namedMethods(T) { + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } else { + // This branch must match the logic in the branch above, using go/types + // APIs without sorting. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if typeparams.OriginMethod(meth) != meth { + return "", false + } + + recvT := meth.Type().(*types.Signature).Recv().Type() + if ptr, ok := recvT.(*types.Pointer); ok { + recvT = ptr.Elem() + } + + named, ok := recvT.(*types.Named) + if !ok { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + + if !enc.skipMethodSorting { + for i, m := range enc.namedMethods(named) { + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + } else { + // This branch must match the logic of the branch above, using go/types + // APIs without sorting. + for i := 0; i < named.NumMethods(); i++ { + m := named.Method(i) + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + } + + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Signature: + if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + return r + } + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults), seen) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == obj { + return path2 // found field var + } + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *typeparams.TypeParam: + name := T.Obj() + if name == obj { + return append(path, opObj) + } + if seen[name] { + return nil + } + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, opTypeParam, i) + if r := find(obj, tparam, path2, seen); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + return object(pkg, string(p), false) +} + +// Note: the skipMethodSorting parameter must match the value of +// Encoder.skipMethodSorting used during encoding. +func object(pkg *types.Package, pathstr string, skipMethodSorting bool) (types.Object, error) { + if pathstr == "" { + return nil, fmt.Errorf("empty path") + } + + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *typeparams.TypeParamList + } + // abstraction of *types.{Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opConstraint: + tparam, ok := t.(*typeparams.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + if skipMethodSorting { + obj = t.Method(index) + } else { + methods := namedMethods(t) // (unmemoized) + obj = methods[index] // Id-ordered + } + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// namedMethods returns the methods of a Named type in ascending Id order. +func namedMethods(named *types.Named) []*types.Func { + methods := make([]*types.Func, named.NumMethods()) + for i := range methods { + methods[i] = named.Method(i) + } + sort.Slice(methods, func(i, j int) bool { + return methods[i].Id() < methods[j].Id() + }) + return methods +} + +// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. +func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { + m := enc.namedMethodsMemo + if m == nil { + m = make(map[*types.Named][]*types.Func) + enc.namedMethodsMemo = m + } + methods, ok := m[named] + if !ok { + methods = namedMethods(named) // allocates and sorts + m[named] = methods + } + return methods +} + +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo + if m == nil { + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m + } + objs, ok := m[scope] + if !ok { + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs + } + return objs +} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 0000000000..a6cf0e64a4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that occurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +// eventLabelMap implements label.Map for a the labels of an Event. +type eventLabelMap struct { + event Event +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 0000000000..05f3a9a579 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 0000000000..06c1d4615e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 0000000000..5dc6e6babe --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 0000000000..4d55e577d1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 0000000000..a02206e301 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) interface{} { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 0000000000..7e95866592 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 0000000000..0f526e1f9a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,215 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped interface{} +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() interface{} { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +type stringptr unsafe.Pointer + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: stringptr(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(stringptr)) + hdr.Len = int(t.packed) + return v +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + for _, f := range f.keys { + if l.Key() == f { + return Label{} + } + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go new file mode 100644 index 0000000000..581b26c204 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag provides the labels used for telemetry throughout gopls. +package tag + +import ( + "golang.org/x/tools/internal/event/keys" +) + +var ( + // create the label keys we use + Method = keys.NewString("method", "") + StatusCode = keys.NewString("status.code", "") + StatusMessage = keys.NewString("status.message", "") + RPCID = keys.NewString("id", "") + RPCDirection = keys.NewString("direction", "") + File = keys.NewString("file", "") + Directory = keys.New("directory", "") + URI = keys.New("URI", "") + Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs + PackagePath = keys.NewString("package_path", "") + Query = keys.New("query", "") + Snapshot = keys.NewUInt64("snapshot", "") + Operation = keys.NewString("operation", "") + + Position = keys.New("position", "") + Category = keys.NewString("category", "") + PackageCount = keys.NewInt("packages", "") + Files = keys.New("files", "") + Port = keys.NewInt("port", "") + Type = keys.New("type", "") + HoverKind = keys.NewString("hoverkind", "") + + NewServer = keys.NewString("new_server", "A new server was added") + EndServer = keys.NewString("end_server", "A server was shut down") + + ServerID = keys.NewString("server", "The server ID an event is related to") + Logfile = keys.NewString("logfile", "") + DebugAddress = keys.NewString("debug_address", "") + GoplsPath = keys.NewString("gopls_path", "") + ClientID = keys.NewString("client_id", "") + + Level = keys.NewInt("level", "The logging level") +) + +var ( + // create the stats we measure + Started = keys.NewInt64("started", "Count of started RPCs.") + ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) + SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) + Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) +) + +const ( + Inbound = "in" + Outbound = "out" +) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go new file mode 100644 index 0000000000..d98b0db2a9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -0,0 +1,150 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sync" +) + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*fileInfo +} + +type fileInfo struct { + file *token.File + lastline int +} + +const maxlines = 64 * 1024 + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we reserve maxlines + // positions per file. We delay calling token.File.SetLines until all + // positions have been calculated (by way of fakeFileSet.setLines), so that + // we can avoid setting unnecessary lines. See also golang/go#46586. + f := s.files[file] + if f == nil { + f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} + s.files[file] = f + } + if line > maxlines { + line = 1 + } + if line > f.lastline { + f.lastline = line + } + + // Return a fake position assuming that f.file consists only of newlines. + return token.Pos(f.file.Base() + line - 1) +} + +func (s *fakeFileSet) setLines() { + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + for _, f := range s.files { + f.file.SetLines(fakeLines[:f.lastline]) + } +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + predecl = append(predecl, additionalPredeclared()...) + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go new file mode 100644 index 0000000000..f6437feb1c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -0,0 +1,99 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + length, err := strconv.Atoi(s) + size = int64(length) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// The size result is the length of the export data in bytes, or -1 if not known. +func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + hdr = string(line) + if size < 0 { + size = -1 + } + + return +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go new file mode 100644 index 0000000000..2d078ccb19 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -0,0 +1,273 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. +package gcimporter // import "golang.org/x/tools/internal/gcimporter" + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "go/token" + "go/types" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +const ( + // Enable debug during development: it adds some additional checks, and + // prevents errors from being recovered. + debug = false + + // If trace is set, debugging output is printed to std out. + trace = false +) + +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + } + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + var size int64 + buf := bufio.NewReader(rc) + if hdr, size, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$B\n": + var data []byte + data, err = io.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'u': // unified, from go1.20 + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go new file mode 100644 index 0000000000..6103dd7102 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -0,0 +1,1322 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/tokeninternal" + "golang.org/x/tools/internal/typeparams" +) + +// IExportShallow encodes "shallow" export data for the specified package. +// +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { + // In principle this operation can only fail if out.Write fails, + // but that's impossible for bytes.Buffer---and as a matter of + // fact iexportCommon doesn't even check for I/O errors. + // TODO(adonovan): handle I/O errors properly. + // TODO(adonovan): use byte slices throughout, avoiding copying. + const bundle, shallow = false, true + var out bytes.Buffer + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return out.Bytes(), err +} + +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from +// cmd/compile or gcexportdata.Write. +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { + const bundle = false + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) + if err != nil { + return nil, err + } + return pkgs[0], nil +} + +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{}) + +// Current bundled export format version. Increase with each format change. +// 0: initial implementation +const bundleVersion = 0 + +// IExportData writes indexed export data for pkg to out. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + const bundle, shallow = false, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) +} + +// IExportBundle writes an indexed export bundle for pkgs to out. +func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + const bundle, shallow = true, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) +} + +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { + if !debug { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + } + + p := iexporter{ + fset: fset, + version: version, + shallow: shallow, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + tparamNames: map[types.Object]string{}, + typIndex: map[types.Type]uint64{}, + } + if !bundle { + p.localpkg = pkgs[0] + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + for _, pkg := range pkgs { + scope := pkg.Scope() + for _, name := range scope.Names() { + if token.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + if bundle { + // Ensure pkg and its imports are included in the index. + p.allPkgs[pkg] = true + for _, imp := range pkg.Imports() { + p.allPkgs[imp] = true + } + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Produce index of offset of each file record in files. + var files intWriter + var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i + if p.shallow { + fileOffset = make([]uint64, len(p.fileInfos)) + for i, info := range p.fileInfos { + fileOffset[i] = uint64(files.Len()) + p.encodeFile(&files, info.file, info.needed) + } + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + + if bundle { + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.pkg(pkg) + imps := pkg.Imports() + w.uint64(uint64(len(imps))) + for _, imp := range imps { + w.pkg(imp) + } + } + } + w.flush() + + // Assemble header. + var hdr intWriter + if bundle { + hdr.uint64(bundleVersion) + } + hdr.uint64(uint64(p.version)) + hdr.uint64(uint64(p.strings.Len())) + if p.shallow { + hdr.uint64(uint64(files.Len())) + hdr.uint64(uint64(len(fileOffset))) + for _, offset := range fileOffset { + hdr.uint64(offset) + } + } + hdr.uint64(dataLen) + + // Flush output. + io.Copy(out, &hdr) + io.Copy(out, &p.strings) + if p.shallow { + io.Copy(out, &files) + } + io.Copy(out, &p.data0) + + return nil +} + +// encodeFile writes to w a representation of the file sufficient to +// faithfully restore position information about all needed offsets. +// Mutates the needed array. +func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { + _ = needed[0] // precondition: needed is non-empty + + w.uint64(p.stringOff(file.Name())) + + size := uint64(file.Size()) + w.uint64(size) + + // Sort the set of needed offsets. Duplicates are harmless. + sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + + lines := tokeninternal.GetLines(file) // byte offset of each line start + w.uint64(uint64(len(lines))) + + // Rather than record the entire array of line start offsets, + // we save only a sparse list of (index, offset) pairs for + // the start of each line that contains a needed position. + var sparse [][2]int // (index, offset) pairs +outer: + for i, lineStart := range lines { + lineEnd := size + if i < len(lines)-1 { + lineEnd = uint64(lines[i+1]) + } + // Does this line contains a needed offset? + if needed[0] < lineEnd { + sparse = append(sparse, [2]int{i, lineStart}) + for needed[0] < lineEnd { + needed = needed[1:] + if len(needed) == 0 { + break outer + } + } + } + } + + // Delta-encode the columns. + w.uint64(uint64(len(sparse))) + var prev [2]int + for _, pair := range sparse { + w.uint64(uint64(pair[0] - prev[0])) + w.uint64(uint64(pair[1] - prev[1])) + prev = pair + } +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + type pkgObj struct { + obj types.Object + name string // qualified name; differs from obj.Name for type params + } + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]pkgObj{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + if w.p.localpkg != nil { + pkgObjs[w.p.localpkg] = nil + } + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + name := w.p.exportName(obj) + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].name < objs[j].name + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.name) + w.uint64(index[obj.obj]) + } + } +} + +// exportName returns the 'exported' name of an object. It differs from +// obj.Name() only for type parameters (see tparamExportName for details). +func (p *iexporter) exportName(obj types.Object) (res string) { + if name := p.tparamNames[obj]; name != "" { + return name + } + return obj.Name() +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + version int + + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + // In shallow mode, object positions are encoded as (file, offset). + // Each file is recorded as a line-number table. + // Only the lines of needed positions are saved faithfully. + fileInfo map[*token.File]uint64 // value is index in fileInfos + fileInfos []*filePositions + + data0 intWriter + declIndex map[types.Object]uint64 + tparamNames map[types.Object]string // typeparam->exported name + typIndex map[types.Type]uint64 + + indent int // for tracing support +} + +type filePositions struct { + file *token.File + needed []uint64 // unordered list of needed file offsets +} + +func (p *iexporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. +func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { + index, ok := p.fileInfo[file] + if !ok { + index = uint64(len(p.fileInfo)) + p.fileInfos = append(p.fileInfos, &filePositions{file: file}) + if p.fileInfo == nil { + p.fileInfo = make(map[*token.File]uint64) + } + p.fileInfo[file] = index + } + // Record each needed offset. + info := p.fileInfos[index] + offset := uint64(file.Offset(pos)) + info.needed = append(info.needed, offset) + + return index, offset +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } + + // Shallow export data: don't index decls from other packages. + if p.shallow && obj.Pkg() != p.localpkg { + return + } + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark obj present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + prevFile string + prevLine int64 + prevColumn int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + if trace { + p.trace("exporting decl %v (%T)", obj, obj) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", obj) + }() + } + w := p.newWriter() + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + // We shouldn't see methods in the package scope, + // but the type checker may repair "func () F() {}" + // to "func (Invalid) F()" and then treat it like "func F()", + // so allow that. See golang/go#57729. + if sig.Recv().Type() != types.Typ[types.Invalid] { + panic(internalErrorf("unexpected method: %v", sig)) + } + } + + // Function. + if typeparams.ForSignature(sig).Len() == 0 { + w.tag('F') + } else { + w.tag('G') + } + w.pos(obj.Pos()) + // The tparam list of the function type is the declaration of the type + // params. So, write out the type params right now. Then those type params + // will be referenced via their type offset (via typOff) in all other + // places in the signature and function where they are used. + // + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + t := obj.Type() + + if tparam, ok := t.(*typeparams.TypeParam); ok { + w.tag('P') + w.pos(obj.Pos()) + constraint := tparam.Constraint() + if p.version >= iexportVersionGo1_18 { + implicit := false + if iface, _ := constraint.(*types.Interface); iface != nil { + implicit = typeparams.IsImplicit(iface) + } + w.bool(implicit) + } + w.typ(constraint, obj.Pkg()) + break + } + + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(t, obj.Pkg()) + break + } + + // Defined type. + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + if typeparams.ForNamed(named).Len() == 0 { + w.tag('T') + } else { + w.tag('U') + } + w.pos(obj.Pos()) + + if typeparams.ForNamed(named).Len() > 0 { + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + } + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + if types.IsInterface(t) { + break + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + + // Receiver type parameters are type arguments of the receiver type, so + // their name must be qualified before exporting recv. + if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + prefix := obj.Name() + "." + m.Name() + for i := 0; i < rparams.Len(); i++ { + rparam := rparams.At(i) + name := tparamExportName(prefix, rparam) + w.p.tparamNames[rparam.Obj()] = name + } + } + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.shallow { + w.posV2(pos) + } else if w.p.version >= iexportVersionPosCol { + w.posV1(pos) + } else { + w.posV0(pos) + } +} + +// posV2 encoding (used only in shallow mode) records positions as +// (file, offset), where file is the index in the token.File table +// (which records the file name and newline offsets) and offset is a +// byte offset. It effectively ignores //line directives. +func (w *exportWriter) posV2(pos token.Pos) { + if pos == token.NoPos { + w.uint64(0) + return + } + file := w.p.fset.File(pos) // fset must be non-nil + index, offset := w.p.fileIndexAndOffset(file, pos) + w.uint64(1 + index) + w.uint64(offset) +} + +func (w *exportWriter) posV1(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + column := int64(p.Column) + + deltaColumn := (column - w.prevColumn) << 1 + deltaLine := (line - w.prevLine) << 1 + + if file != w.prevFile { + deltaLine |= 1 + } + if deltaLine != 0 { + deltaColumn |= 1 + } + + w.int64(deltaColumn) + if deltaColumn&1 != 0 { + w.int64(deltaLine) + if deltaLine&1 != 0 { + w.string(file) + } + } + + w.prevFile = file + w.prevLine = line + w.prevColumn = column +} + +func (w *exportWriter) posV0(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedType(obj *types.TypeName) { + name := w.p.exportName(obj) + + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + w.string(name) + w.pkg(obj.Pkg()) +} + +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + if trace { + w.p.trace("exporting type %s (%T)", t, t) + w.p.indent++ + defer func() { + w.p.indent-- + w.p.trace("=> %s", t) + }() + } + switch t := t.(type) { + case *types.Named: + if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + // TODO(rfindley): investigate if this position is correct, and if it + // matters. + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(typeparams.NamedTypeOrigin(t), pkg) + return + } + w.startType(definedType) + w.qualifiedType(t.Obj()) + + case *typeparams.TypeParam: + w.startType(typeParamType) + w.qualifiedType(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.pkg(pkg) + w.signature(t) + + case *types.Struct: + w.startType(structType) + n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg + if n > 0 { + fieldPkg = t.Field(0).Pkg() + } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } + } + w.pkg(fieldPkg) + w.uint64(uint64(n)) + + for i := 0; i < n; i++ { + f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } + w.pos(f.Pos()) + w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg + w.typ(f.Type(), fieldPkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.pkg(pkg) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + ft := t.EmbeddedType(i) + tPkg := pkg + if named, _ := ft.(*types.Named); named != nil { + w.pos(named.Obj().Pos()) + } else { + w.pos(token.NoPos) + } + w.typ(ft, tPkg) + } + + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + case *typeparams.Union: + w.startType(unionType) + nt := t.Len() + w.uint64(uint64(nt)) + for i := 0; i < nt; i++ { + term := t.Term(i) + w.bool(term.Tilde()) + w.typ(term.Type(), pkg) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return + } + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { + w.uint64(uint64(ts.Len())) + for i := 0; i < ts.Len(); i++ { + w.typ(ts.At(i), pkg) + } +} + +func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { + ll := uint64(list.Len()) + w.uint64(ll) + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + // Set the type parameter exportName before exporting its type. + exportName := tparamExportName(prefix, tparam) + w.p.tparamNames[tparam.Obj()] = exportName + w.typ(list.At(i), pkg) + } +} + +const blankMarker = "$" + +// tparamExportName returns the 'exported' name of a type parameter, which +// differs from its actual object name: it is prefixed with a qualifier, and +// blank type parameter names are disambiguated by their index in the type +// parameter list. +func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { + assert(prefix != "") + name := tparam.Obj().Name() + if name == "_" { + name = blankMarker + strconv.Itoa(tparam.Index()) + } + return prefix + "." + name +} + +// tparamName returns the real name of a type parameter, after stripping its +// qualifying prefix and reverting blank-name encoding. See tparamExportName +// for details. +func tparamName(exportName string) string { + // Remove the "path" from the type param name that makes it unique. + ix := strings.LastIndex(exportName, ".") + if ix < 0 { + errorf("malformed type parameter export name %s: missing prefix", exportName) + } + name := exportName[ix+1:] + if strings.HasPrefix(name, blankMarker) { + return "_" + } + return name +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + if w.p.version >= iexportVersionGo1_18 { + w.int64(int64(v.Kind())) + } + + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + w.bool(constant.BoolVal(v)) + case types.IsInteger: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case types.IsFloat: + f := constantToFloat(v) + w.mpfloat(f, typ) + case types.IsComplex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case types.IsString: + w.string(constant.StringVal(v)) + default: + if b.Kind() == types.Invalid { + // package contains type errors + break + } + panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + x = constant.ToFloat(x) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go new file mode 100644 index 0000000000..8e64cf644f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -0,0 +1,1083 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "sort" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/typeparams" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +// Keep this in sync with constants in iexport.go. +const ( + iexportVersionGo1_11 = 0 + iexportVersionPosCol = 1 + iexportVersionGo1_18 = 2 + iexportVersionGenerics = 2 + + iexportVersionCurrent = 2 +) + +type ident struct { + pkg *types.Package + name string +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType + typeParamType + instanceType + unionType +) + +// IImportData imports a package from the serialized package data +// and returns 0 and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) + if err != nil { + return 0, nil, err + } + return 0, pkgs[0], nil +} + +// IImportBundle imports a set of packages from the serialized package bundle. +func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) +} + +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. +// +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} + +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. +// +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg + } + return nil + } +} + +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { + const currentVersion = iexportVersionCurrent + version := int64(-1) + if !debug { + defer func() { + if e := recover(); e != nil { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) + } + } + }() + } + + r := &intReader{bytes.NewReader(data), path} + + if bundle { + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) + } + } + + version = int64(r.uint64()) + switch version { + case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: + default: + if version > iexportVersionGo1_18 { + errorf("unstable iexport format version %d, just rebuild compiler and std library", version) + } else { + errorf("unknown iexport format version %d", version) + } + } + + sLen := int64(r.uint64()) + var fLen int64 + var fileOffset []uint64 + if shallow { + // Shallow mode uses a different position encoding. + fLen = int64(r.uint64()) + fileOffset = make([]uint64, r.uint64()) + for i := range fileOffset { + fileOffset[i] = r.uint64() + } + } + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + fileData := data[whence+sLen : whence+sLen+fLen] + declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] + r.Seek(sLen+fLen+dLen, io.SeekCurrent) + + p := iimporter{ + version: int(version), + ipath: path, + shallow: shallow, + reportf: reportf, + + stringData: stringData, + stringCache: make(map[uint64]string), + fileOffset: fileOffset, + fileData: fileData, + fileCache: make([]*token.File, len(fileOffset)), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + // Separate map for typeparams, keyed by their package and unique + // name. + tparamIndex: make(map[ident]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + } + defer p.fake.setLines() // set lines for files in fset + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + for i := range items { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff + + // Read index for package. + nameIndex := make(map[string]uint64) + nSyms := r.uint64() + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) + for ; nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + items[i].nameIndex = nameIndex + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex + pkgList[i] = pkg + } + + if bundle { + pkgs = make([]*types.Package, r.uint64()) + for i := range pkgs { + pkg := p.pkgAt(r.uint64()) + imps := make([]*types.Package, r.uint64()) + for j := range imps { + imps[j] = p.pkgAt(r.uint64()) + } + pkg.SetImports(imps) + pkgs[i] = pkg + } + } else { + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + pkgs = pkgList[:1] + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + pkgs[0].SetImports(list) + } + + for _, pkg := range pkgs { + if pkg.Complete() { + continue + } + + names := make([]string, 0, len(p.pkgIndex[pkg])) + for name := range p.pkgIndex[pkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(pkg, name) + } + + // package was imported completely and without errors + pkg.MarkComplete() + } + + // SetConstraint can't be called if the constraint type is not yet complete. + // When type params are created in the 'P' case of (*importReader).obj(), + // the associated constraint type may not be complete due to recursion. + // Therefore, we defer calling SetConstraint there, and call it here instead + // after all types are complete. + for _, d := range p.later { + typeparams.SetTypeParamConstraint(d.t, d.constraint) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + + return pkgs, nil +} + +type setConstraintArgs struct { + t *typeparams.TypeParam + constraint types.Type +} + +type iimporter struct { + version int + ipath string + + shallow bool + reportf ReportFunc // if non-nil, used to report bugs + + stringData []byte + stringCache map[uint64]string + fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i + fileData []byte + fileCache []*token.File // memoized decoding of file encoded as i + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + tparamIndex map[ident]types.Type + + fake fakeFileSet + interfaceList []*types.Interface + + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + + // Arguments for calls to SetConstraint that are deferred due to recursive types + later []setConstraintArgs + + indent int // for tracing support +} + +func (p *iimporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + if debug { + p.trace("import decl %s", name) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", name) + }() + } + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) fileAt(index uint64) *token.File { + file := p.fileCache[index] + if file == nil { + off := p.fileOffset[index] + file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) + p.fileCache[index] = file + } + return file +} + +func (p *iimporter) decodeFile(rd intReader) *token.File { + filename := p.stringAt(rd.uint64()) + size := int(rd.uint64()) + file := p.fake.fset.AddFile(filename, -1, size) + + // SetLines requires a nondecreasing sequence. + // Because it is common for clients to derive the interval + // [start, start+len(name)] from a start position, and we + // want to ensure that the end offset is on the same line, + // we fill in the gaps of the sparse encoding with values + // that strictly increase by the largest possible amount. + // This allows us to avoid having to record the actual end + // offset of each needed line. + + lines := make([]int, int(rd.uint64())) + var index, offset int + for i, n := 0, int(rd.uint64()); i < n; i++ { + index += int(rd.uint64()) + offset += int(rd.uint64()) + lines[index] = offset + + // Ensure monotonicity between points. + for j := index - 1; j > 0 && lines[j] == 0; j-- { + lines[j] = lines[j+1] - 1 + } + } + + // Ensure monotonicity after last point. + for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { + size-- + lines[j] = size + } + + if !file.SetLines(lines) { + errorf("SetLines failed: %d", lines) // can't happen + } + return file +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && canReuse(base, t) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if canReuse(base, t) { + p.typCache[off] = t + } + return t +} + +// canReuse reports whether the type rhs on the RHS of the declaration for def +// may be re-used. +// +// Specifically, if def is non-nil and rhs is an interface type with methods, it +// may not be re-used because we have a convention of setting the receiver type +// for interface methods to def. +func canReuse(def *types.Named, rhs types.Type) bool { + if def == nil { + return true + } + iface, _ := rhs.(*types.Interface) + if iface == nil { + return true + } + // Don't use iface.Empty() here as iface may not be complete. + return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F', 'G': + var tparams []*typeparams.TypeParam + if tag == 'G' { + tparams = r.tparamList() + } + sig := r.signature(nil, nil, tparams) + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T', 'U': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + // Declare obj before calling r.tparamList, so the new type name is recognized + // if used in the constraint of one of its own typeparams (see #48280). + r.declare(obj) + if tag == 'U' { + tparams := r.tparamList() + typeparams.SetForNamed(named, tparams) + } + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + + // If the receiver has any targs, set those as the + // rparams of the method (since those are the + // typeparams being used in the method sig/body). + base := baseType(recv.Type()) + assert(base != nil) + targs := typeparams.NamedTypeArgs(base) + var rparams []*typeparams.TypeParam + if targs.Len() > 0 { + rparams = make([]*typeparams.TypeParam, targs.Len()) + for i := range rparams { + rparams[i] = targs.At(i).(*typeparams.TypeParam) + } + } + msig := r.signature(recv, rparams, nil) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'P': + // We need to "declare" a typeparam in order to have a name that + // can be referenced recursively (if needed) in the type param's + // bound. + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + name0 := tparamName(name) + tn := types.NewTypeName(pos, r.currPkg, name0, nil) + t := typeparams.NewTypeParam(tn, nil) + + // To handle recursive references to the typeparam within its + // bound, save the partial type in tparamIndex before reading the bounds. + id := ident{r.currPkg, name} + r.p.tparamIndex[id] = t + var implicit bool + if r.p.version >= iexportVersionGo1_18 { + implicit = r.bool() + } + constraint := r.typ() + if implicit { + iface, _ := constraint.(*types.Interface) + if iface == nil { + errorf("non-interface constraint marked implicit") + } + typeparams.MarkImplicit(iface) + } + // The constraint type may not be complete, if we + // are in the middle of a type recursion involving type + // constraints. So, we defer SetConstraint until we have + // completely set up all types in ImportData. + r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + if r.p.version >= iexportVersionGo1_18 { + // TODO: add support for using the kind. + _ = constant.Kind(r.int64()) + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + x.SetInt64(v) + return + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) + if signed && n&1 != 0 { + x.Neg(x) + } +} + +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) + } + return constant.Make(&f) +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.shallow { + // precise offsets are encoded only in shallow mode + return r.posv2() + } + if r.p.version >= iexportVersionPosCol { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) posv2() token.Pos { + file := r.uint64() + if file == 0 { + return token.NoPos + } + tf := r.p.fileAt(file - 1) + return tf.Pos(int(r.uint64())) +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) (res types.Type) { + k := r.kind() + if debug { + r.p.trace("importing type %d (base: %s)", k, base) + r.p.indent++ + defer func() { + r.p.indent-- + r.p.trace("=> %s", res) + }() + } + switch k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil, nil, nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + msig := r.signature(recv, nil, nil) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + + case typeParamType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + pkg, name := r.qualifiedIdent() + id := ident{pkg, name} + if t, ok := r.p.tparamIndex[id]; ok { + // We're already in the process of importing this typeparam. + return t + } + // Otherwise, import the definition of the typeparam now. + r.p.doDecl(pkg, name) + return r.p.tparamIndex[id] + + case instanceType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + // pos does not matter for instances: they are positioned on the original + // type. + _ = r.pos() + len := r.uint64() + targs := make([]types.Type, len) + for i := range targs { + targs[i] = r.typ() + } + baseType := r.typ() + // The imported instantiated type doesn't include any methods, so + // we must always use the methods of the base (orig) type. + // TODO provide a non-nil *Environment + t, _ := typeparams.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) + return t + + case unionType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + terms := make([]*typeparams.Term, r.uint64()) + for i := range terms { + terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + } + return typeparams.NewUnion(terms) + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + +func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) +} + +func (r *importReader) tparamList() []*typeparams.TypeParam { + n := r.uint64() + if n == 0 { + return nil + } + xs := make([]*typeparams.TypeParam, n) + for i := range xs { + // Note: the standard library importer is tolerant of nil types here, + // though would panic in SetTypeParams. + xs[i] = r.typ().(*typeparams.TypeParam) + } + return xs +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} + +func baseType(typ types.Type) *types.Named { + // pointer receivers are never types.Named types + if p, _ := typ.(*types.Pointer); p != nil { + typ = p.Elem() + } + // receiver base types are always (possibly generic) types.Named types + n, _ := typ.(*types.Named) + return n +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go new file mode 100644 index 0000000000..8b163e3d05 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go new file mode 100644 index 0000000000..49984f40fd --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go new file mode 100644 index 0000000000..d892273efb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGo1_11 + +func additionalPredeclared() []types.Type { + return nil +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go new file mode 100644 index 0000000000..edbe6ea704 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -0,0 +1,37 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGenerics + +// additionalPredeclared returns additional predeclared types in go.1.18. +func additionalPredeclared() []types.Type { + return []types.Type{ + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + types.Universe.Lookup("any").Type(), + } +} + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go new file mode 100644 index 0000000000..286bf44548 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(go1.18 && goexperiment.unified) +// +build !go1.18 !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go new file mode 100644 index 0000000000..b5d69ffbe6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 && goexperiment.unified +// +build go1.18,goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go new file mode 100644 index 0000000000..8eb20729c2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" +) + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") + return +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go new file mode 100644 index 0000000000..b977435f62 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -0,0 +1,728 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int + + // ifaces holds a list of constructed Interfaces, which need to have + // Complete called after importing is done. + ifaces []*types.Interface +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + for _, iface := range pr.ifaces { + iface.Complete() + } + + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + var filename string + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename = r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + pr.retireReader(r) + } + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + var typ types.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + + // We need to call iface.Complete(), but if there are any embedded + // defined types, then we may not have set their underlying + // interface type yet. So we need to defer calling Complete until + // after we've called SetUnderlying everywhere. + // + // TODO(mdempsky): After CL 424876 lands, it should be safe to call + // iface.Complete() immediately. + r.p.ifaces = append(r.p.ifaces, iface) + + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + + var objPkg *types.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + // Ignore local types promoted to global scope (#55110). + if _, suffix := splitVargenSuffix(objName); suffix != "" { + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(types.NewTypeName(pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + setUnderlying := func(underlying types.Type) { + // If the underlying type is an interface, we need to + // duplicate its methods so we can replace the receiver + // parameter's type (#49906). + if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + methods := make([]*types.Func, iface.NumExplicitMethods()) + for i := range methods { + fn := iface.ExplicitMethod(i) + sig := fn.Type().(*types.Signature) + + recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + } + + embeds := make([]types.Type, iface.NumEmbeddeds()) + for i := range embeds { + embeds[i] = iface.EmbeddedType(i) + } + + newIface := types.NewInterfaceType(methods, embeds) + r.p.ifaces = append(r.p.ifaces, newIface) + underlying = newIface + } + + named.SetUnderlying(underlying) + } + + // Since go.dev/cl/455279, we can assume rhs.Underlying() will + // always be non-nil. However, to temporarily support users of + // older snapshot releases, we continue to fallback to the old + // behavior for now. + // + // TODO(mdempsky): Remove fallback code and simplify after + // allowing time for snapshot users to upgrade. + rhs := r.typ() + if underlying := rhs.Underlying(); underlying != nil { + setUnderlying(underlying) + } else { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } + setUnderlying(rhs.Underlying()) + }) + } + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + + var dict readerDict + + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + pr.retireReader(r) + } + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 0000000000..53cf66da01 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,462 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "os" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + exec "golang.org/x/sys/execabs" + + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" +) + +// An Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { + return stdout, stderr, friendlyErr, err + } + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, nil, ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + + // If ModFlag is set, the go command is invoked with -mod=ModFlag. + ModFlag string + + // If ModFile is set, the go command is invoked with -modfile=ModFile. + ModFile string + + // If Overlay is set, the go command is invoked with -overlay=Overlay. + Overlay string + + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool + Env []string + WorkingDir string + Logf func(format string, args ...interface{}) +} + +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + + goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) + case "mod": + // mod needs the sub-verb before flags. + goArgs = append(goArgs, i.Args[0]) + appendModFile() + goArgs = append(goArgs, i.Args[1:]...) + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + goArgs = append(goArgs, i.Args...) + + default: // notably list and build. + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + + return runCmdContext(ctx, cmd) +} + +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that it has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that it still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { + return err + } + + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + HandleHangingGoCommand(cmd.Process) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + } + + // Cancelled. Interrupt and see if it ends voluntarily. + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } + } + + // Didn't shut down in response to interrupt. Kill it hard. + // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT + // on certain platforms, such as unix. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { + log.Printf("error killing the Go command: %v", err) + } + + return <-resChan +} + +func HandleHangingGoCommand(proc *os.Process) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + +The gopls test runner has detected a hanging go command. In order to debug +this, the output of ps and lsof/fstat is printed below. + +See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + panic(fmt.Sprintf("running ps: %v", err)) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + panic(fmt.Sprintf("running %s: %v", listFiles, err)) + } + } + panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 0000000000..2d3d408c0b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,109 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return false, nil, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + // Don't override an explicit '-mod=' argument. + if modFlag == "vendor" { + return true, mainMod, nil + } else if modFlag != "" { + return false, nil, nil + } + if mainMod == nil || !go114 { + return false, nil, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return true, mainMod, nil + } + } + return false, nil, nil +} + +// getMainModuleAnd114 gets one of the main modules' information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 0000000000..446c5846a6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,71 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} + inv.BuildFlags = nil // This is not a build command. + inv.ModFlag = "" + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" and return highest go1.X value. + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} + +// GoVersionOutput returns the complete output of the go version command. +func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return goVersion.String(), nil +} + +// ParseGoVersionOutput extracts the Go version string +// from the output of the "go version" command. +// Given an unrecognized form, it returns an empty string. +func ParseGoVersionOutput(data string) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindStringSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return m[1] +} diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 0000000000..d9950b1f0b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +import ( + "golang.org/x/tools/internal/gocommand" +) + +var GetForTest = func(p interface{}) string { return "" } +var GetDepsErrors = func(p interface{}) []*PackageError { return nil } + +type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error (if present, file:line:col) + Err string // the error itself +} + +var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } + +var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} + +var TypecheckCgo int +var DepsErrors int // must be set as a LoadMode to call GetDepsErrors +var ForTest int // must be set as a LoadMode to call GetForTest + +var SetModFlag = func(config interface{}, value string) {} +var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/internal/pkgbits/codes.go new file mode 100644 index 0000000000..f0cabde96e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go new file mode 100644 index 0000000000..b92e8e6eb3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -0,0 +1,517 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "errors" + "fmt" + "go/constant" + "go/token" + "io" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 + + scratchRelocEnt []RelocEnt +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, io.SeekCurrent) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// TempDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +// If possible the Decoder should be RetireDecoder'd when it is no longer +// needed, this will avoid heap allocations. +func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.TempDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +func (pr *PkgDecoder) RetireDecoder(d *Decoder) { + pr.scratchRelocEnt = d.Relocs + d.Relocs = nil +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + l := r.Len() + if cap(pr.scratchRelocEnt) >= l { + r.Relocs = pr.scratchRelocEnt[:l] + pr.scratchRelocEnt = nil + } else { + r.Relocs = make([]RelocEnt, l) + } + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := readUvarint(&r.Data) + r.checkErr(err) + return x +} + +// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. +// This avoids the interface conversion and thus has better escape properties, +// which flows up the stack. +func readUvarint(r *strings.Reader) (uint64, error) { + var x uint64 + var s uint + for i := 0; i < binary.MaxVarintLen64; i++ { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == binary.MaxVarintLen64-1 && b > 1 { + return x, overflow + } + return x | uint64(b)<> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, io.SeekCurrent) + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Uint64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + var path string + { + r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) + path = r.String() + pr.RetireDecoder(&r) + } + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + var ridx Index + var name string + var rcode int + { + r := pr.TempDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + ridx = r.Reloc(RelocPkg) + name = r.String() + rcode = r.Code(SyncCodeObj) + pr.RetireDecoder(&r) + } + + path := pr.PeekPkgPath(ridx) + assert(name != "") + + tag := CodeObj(rcode) + + return path, name, tag +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/vendor/golang.org/x/tools/internal/pkgbits/doc.go new file mode 100644 index 0000000000..c8a2796b5e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go new file mode 100644 index 0000000000..6482617a4f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -0,0 +1,383 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + RelocMap map[RelocEnt]uint32 + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + e := RelocEnt{r, idx} + if w.RelocMap != nil { + if i, ok := w.RelocMap[e]; ok { + return int(i) + } + } else { + w.RelocMap = make(map[RelocEnt]uint32) + } + + i := len(w.Relocs) + w.RelocMap[e] = uint32(i) + w.Relocs = append(w.Relocs, e) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Uint encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/vendor/golang.org/x/tools/internal/pkgbits/flags.go new file mode 100644 index 0000000000..654222745f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go new file mode 100644 index 0000000000..5294f6a63e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go new file mode 100644 index 0000000000..2324ae7adf --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go new file mode 100644 index 0000000000..fcdfb97ca9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int32 + +// An Index represents a bitstream element index within a particular +// section. +type Index int32 + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go new file mode 100644 index 0000000000..ad26d3b28c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go new file mode 100644 index 0000000000..5bd51ef717 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go new file mode 100644 index 0000000000..4a5b0ca5f2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go new file mode 100644 index 0000000000..7e638ec24f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -0,0 +1,151 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package tokeninternal provides access to some internal features of the token +// package. +package tokeninternal + +import ( + "fmt" + "go/token" + "sort" + "sync" + "unsafe" +) + +// GetLines returns the table of line-start offsets from a token.File. +func GetLines(file *token.File) []int { + // token.File has a Lines method on Go 1.21 and later. + if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { + return file.Lines() + } + + // This declaration must match that of token.File. + // This creates a risk of dependency skew. + // For now we check that the size of the two + // declarations is the same, on the (fragile) assumption + // that future changes would add fields. + type tokenFile119 struct { + _ string + _ int + _ int + mu sync.Mutex // we're not complete monsters + lines []int + _ []struct{} + } + type tokenFile118 struct { + _ *token.FileSet // deleted in go1.19 + tokenFile119 + } + + type uP = unsafe.Pointer + switch unsafe.Sizeof(*file) { + case unsafe.Sizeof(tokenFile118{}): + var ptr *tokenFile118 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + case unsafe.Sizeof(tokenFile119{}): + var ptr *tokenFile119 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + default: + panic("unexpected token.File size") + } +} + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 0000000000..d0d0649fe2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,204 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that interact +// with generic Go code, as introduced with Go 1.18. +// +// Many of the types and functions in this package are proxies for the new APIs +// introduced in the standard library with Go 1.18. For example, the +// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec +// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go +// versions older than 1.18 these helpers are implemented as stubs, allowing +// users of this package to write code that handles generic constructs inline, +// even if the Go version being used to compile does not support generics. +// +// Additionally, this package contains common utilities for working with the +// new generic constructs, to supplement the standard library APIs. Notably, +// the StructuralTerms API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter. +func IsTypeParam(t types.Type) bool { + _, ok := t.(*TypeParam) + return ok +} + +// OriginMethod returns the origin method associated with the method fn. +// For methods on a non-generic receiver base type, this is just +// fn. However, for methods with a generic receiver, OriginMethod returns the +// corresponding method in the method set of the origin type. +// +// As a special case, if fn is not a method (has no receiver), OriginMethod +// returns fn. +func OriginMethod(fn *types.Func) *types.Func { + recv := fn.Type().(*types.Signature).Recv() + if recv == nil { + return fn + } + base := recv.Type() + p, isPtr := base.(*types.Pointer) + if isPtr { + base = p.Elem() + } + named, isNamed := base.(*types.Named) + if !isNamed { + // Receiver is a *types.Interface. + return fn + } + if ForNamed(named).Len() == 0 { + // Receiver base has no type parameters, so we can avoid the lookup below. + return fn + } + orig := NamedTypeOrigin(named) + gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + + // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: + // package p + // type T *int + // func (*T) f() {} + // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. + // Here we make them consistent by force. + // (The go/types bug is general, but this workaround is reached only + // for generic T thanks to the early return above.) + if gfn == nil { + mset := types.NewMethodSet(types.NewPointer(orig)) + for i := 0; i < mset.Len(); i++ { + m := mset.At(i) + if m.Obj().Id() == fn.Id() { + gfn = m.Obj() + break + } + } + } + + // In golang/go#61196, we observe another crash, this time inexplicable. + if gfn == nil { + panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) + } + + return gfn.(*types.Func) +} + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := ForNamed(VN) + ttparams := ForNamed(TN) + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 0000000000..71248209ee --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := _NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// _NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// _NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, _NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, _NormalTerms returns ErrEmptyTypeSet. +// +// _NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func _NormalTerms(typ types.Type) ([]*Term, error) { + switch typ := typ.(type) { + case *TypeParam: + return StructuralTerms(typ) + case *Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*Term{NewTerm(false, typ)}, nil + } +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go new file mode 100644 index 0000000000..18212390e1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go @@ -0,0 +1,12 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package typeparams + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go new file mode 100644 index 0000000000..d67148823c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go @@ -0,0 +1,15 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typeparams + +// Note: this constant is in a separate file as this is the only acceptable +// diff between the <1.18 API of this package and the 1.18 API. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 0000000000..9c631b6512 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *TypeParam) ([]*Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *Union) ([]*Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*Term + for _, term := range tset.terms { + terms = append(terms, NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *TypeParam, *Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 0000000000..cbd12f8013 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" | ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go new file mode 100644 index 0000000000..7ed86e1711 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go @@ -0,0 +1,197 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +func unsupported() { + panic("type parameters are unsupported at this go version") +} + +// IndexListExpr is a placeholder type, as type parameters are not supported at +// this Go version. Its methods panic on use. +type IndexListExpr struct { + ast.Expr + X ast.Expr // expression + Lbrack token.Pos // position of "[" + Indices []ast.Expr // index expressions + Rbrack token.Pos // position of "]" +} + +// ForTypeSpec returns an empty field list, as type parameters on not supported +// at this Go version. +func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { + return nil +} + +// ForFuncType returns an empty field list, as type parameters are not +// supported at this Go version. +func ForFuncType(*ast.FuncType) *ast.FieldList { + return nil +} + +// TypeParam is a placeholder type, as type parameters are not supported at +// this Go version. Its methods panic on use. +type TypeParam struct{ types.Type } + +func (*TypeParam) Index() int { unsupported(); return 0 } +func (*TypeParam) Constraint() types.Type { unsupported(); return nil } +func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } + +// TypeParamList is a placeholder for an empty type parameter list. +type TypeParamList struct{} + +func (*TypeParamList) Len() int { return 0 } +func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } + +// TypeList is a placeholder for an empty type list. +type TypeList struct{} + +func (*TypeList) Len() int { return 0 } +func (*TypeList) At(int) types.Type { unsupported(); return nil } + +// NewTypeParam is unsupported at this Go version, and panics. +func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { + unsupported() + return nil +} + +// SetTypeParamConstraint is unsupported at this Go version, and panics. +func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { + unsupported() +} + +// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or +// typeParams is non-empty. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + if len(recvTypeParams) != 0 || len(typeParams) != 0 { + panic("signatures cannot have type parameters at this Go version") + } + return types.NewSignature(recv, params, results, variadic) +} + +// ForSignature returns an empty slice. +func ForSignature(*types.Signature) *TypeParamList { + return nil +} + +// RecvTypeParams returns a nil slice. +func RecvTypeParams(sig *types.Signature) *TypeParamList { + return nil +} + +// IsComparable returns false, as no interfaces are type-restricted at this Go +// version. +func IsComparable(*types.Interface) bool { + return false +} + +// IsMethodSet returns true, as no interfaces are type-restricted at this Go +// version. +func IsMethodSet(*types.Interface) bool { + return true +} + +// IsImplicit returns false, as no interfaces are implicit at this Go version. +func IsImplicit(*types.Interface) bool { + return false +} + +// MarkImplicit does nothing, because this Go version does not have implicit +// interfaces. +func MarkImplicit(*types.Interface) {} + +// ForNamed returns an empty type parameter list, as type parameters are not +// supported at this Go version. +func ForNamed(*types.Named) *TypeParamList { + return nil +} + +// SetForNamed panics if tparams is non-empty. +func SetForNamed(_ *types.Named, tparams []*TypeParam) { + if len(tparams) > 0 { + unsupported() + } +} + +// NamedTypeArgs returns nil. +func NamedTypeArgs(*types.Named) *TypeList { + return nil +} + +// NamedTypeOrigin is the identity method at this Go version. +func NamedTypeOrigin(named *types.Named) *types.Named { + return named +} + +// Term holds information about a structural type restriction. +type Term struct { + tilde bool + typ types.Type +} + +func (m *Term) Tilde() bool { return m.tilde } +func (m *Term) Type() types.Type { return m.typ } +func (m *Term) String() string { + pre := "" + if m.tilde { + pre = "~" + } + return pre + m.typ.String() +} + +// NewTerm is unsupported at this Go version, and panics. +func NewTerm(tilde bool, typ types.Type) *Term { + return &Term{tilde, typ} +} + +// Union is a placeholder type, as type parameters are not supported at this Go +// version. Its methods panic on use. +type Union struct{ types.Type } + +func (*Union) Len() int { return 0 } +func (*Union) Term(i int) *Term { unsupported(); return nil } + +// NewUnion is unsupported at this Go version, and panics. +func NewUnion(terms []*Term) *Union { + unsupported() + return nil +} + +// InitInstanceInfo is a noop at this Go version. +func InitInstanceInfo(*types.Info) {} + +// Instance is a placeholder type, as type parameters are not supported at this +// Go version. +type Instance struct { + TypeArgs *TypeList + Type types.Type +} + +// GetInstances returns a nil map, as type parameters are not supported at this +// Go version. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } + +// Context is a placeholder type, as type parameters are not supported at +// this Go version. +type Context struct{} + +// NewContext returns a placeholder Context instance. +func NewContext() *Context { + return &Context{} +} + +// Instantiate is unsupported on this Go version, and panics. +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { + unsupported() + return nil, nil +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go new file mode 100644 index 0000000000..cf301af1db --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go @@ -0,0 +1,151 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// IndexListExpr is an alias for ast.IndexListExpr. +type IndexListExpr = ast.IndexListExpr + +// ForTypeSpec returns n.TypeParams. +func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} + +// ForFuncType returns n.TypeParams. +func ForFuncType(n *ast.FuncType) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} + +// TypeParam is an alias for types.TypeParam +type TypeParam = types.TypeParam + +// TypeParamList is an alias for types.TypeParamList +type TypeParamList = types.TypeParamList + +// TypeList is an alias for types.TypeList +type TypeList = types.TypeList + +// NewTypeParam calls types.NewTypeParam. +func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { + return types.NewTypeParam(name, constraint) +} + +// SetTypeParamConstraint calls tparam.SetConstraint(constraint). +func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { + tparam.SetConstraint(constraint) +} + +// NewSignatureType calls types.NewSignatureType. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) +} + +// ForSignature returns sig.TypeParams() +func ForSignature(sig *types.Signature) *TypeParamList { + return sig.TypeParams() +} + +// RecvTypeParams returns sig.RecvTypeParams(). +func RecvTypeParams(sig *types.Signature) *TypeParamList { + return sig.RecvTypeParams() +} + +// IsComparable calls iface.IsComparable(). +func IsComparable(iface *types.Interface) bool { + return iface.IsComparable() +} + +// IsMethodSet calls iface.IsMethodSet(). +func IsMethodSet(iface *types.Interface) bool { + return iface.IsMethodSet() +} + +// IsImplicit calls iface.IsImplicit(). +func IsImplicit(iface *types.Interface) bool { + return iface.IsImplicit() +} + +// MarkImplicit calls iface.MarkImplicit(). +func MarkImplicit(iface *types.Interface) { + iface.MarkImplicit() +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(named *types.Named) *TypeParamList { + return named.TypeParams() +} + +// SetForNamed sets the type params tparams on n. Each tparam must be of +// dynamic type *types.TypeParam. +func SetForNamed(n *types.Named, tparams []*TypeParam) { + n.SetTypeParams(tparams) +} + +// NamedTypeArgs returns named.TypeArgs(). +func NamedTypeArgs(named *types.Named) *TypeList { + return named.TypeArgs() +} + +// NamedTypeOrigin returns named.Orig(). +func NamedTypeOrigin(named *types.Named) *types.Named { + return named.Origin() +} + +// Term is an alias for types.Term. +type Term = types.Term + +// NewTerm calls types.NewTerm. +func NewTerm(tilde bool, typ types.Type) *Term { + return types.NewTerm(tilde, typ) +} + +// Union is an alias for types.Union +type Union = types.Union + +// NewUnion calls types.NewUnion. +func NewUnion(terms []*Term) *Union { + return types.NewUnion(terms) +} + +// InitInstanceInfo initializes info to record information about type and +// function instances. +func InitInstanceInfo(info *types.Info) { + info.Instances = make(map[*ast.Ident]types.Instance) +} + +// Instance is an alias for types.Instance. +type Instance = types.Instance + +// GetInstances returns info.Instances. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { + return info.Instances +} + +// Context is an alias for types.Context. +type Context = types.Context + +// NewContext calls types.NewContext. +func NewContext() *Context { + return types.NewContext() +} + +// Instantiate calls types.Instantiate. +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { + return types.Instantiate(ctxt, typ, targs, validate) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 0000000000..7350bb702a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,169 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go new file mode 100644 index 0000000000..07484073a5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -0,0 +1,1560 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +//go:generate stringer -type=ErrorCode + +type ErrorCode int + +// This file defines the error codes that can be produced during type-checking. +// Collectively, these codes provide an identifier that may be used to +// implement special handling for certain types of errors. +// +// Error codes should be fine-grained enough that the exact nature of the error +// can be easily determined, but coarse enough that they are not an +// implementation detail of the type checking algorithm. As a rule-of-thumb, +// errors should be considered equivalent if there is a theoretical refactoring +// of the type checker in which they are emitted in exactly one place. For +// example, the type checker emits different error messages for "too many +// arguments" and "too few arguments", but one can imagine an alternative type +// checker where this check instead just emits a single "wrong number of +// arguments", so these errors should have the same code. +// +// Error code names should be as brief as possible while retaining accuracy and +// distinctiveness. In most cases names should start with an adjective +// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), +// and end with a noun identifying the relevant language object. For example, +// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the +// convention that "bad" implies a problem with syntax, and "invalid" implies a +// problem with types. + +const ( + // InvalidSyntaxTree occurs if an invalid syntax tree is provided + // to the type checker. It should never happen. + InvalidSyntaxTree ErrorCode = -1 +) + +const ( + _ ErrorCode = iota + + // Test is reserved for errors that only apply while in self-test mode. + Test + + /* package names */ + + // BlankPkgName occurs when a package name is the blank identifier "_". + // + // Per the spec: + // "The PackageName must not be the blank identifier." + BlankPkgName + + // MismatchedPkgName occurs when a file's package name doesn't match the + // package name already established by other files. + MismatchedPkgName + + // InvalidPkgUse occurs when a package identifier is used outside of a + // selector expression. + // + // Example: + // import "fmt" + // + // var _ = fmt + InvalidPkgUse + + /* imports */ + + // BadImportPath occurs when an import path is not valid. + BadImportPath + + // BrokenImport occurs when importing a package fails. + // + // Example: + // import "amissingpackage" + BrokenImport + + // ImportCRenamed occurs when the special import "C" is renamed. "C" is a + // pseudo-package, and must not be renamed. + // + // Example: + // import _ "C" + ImportCRenamed + + // UnusedImport occurs when an import is unused. + // + // Example: + // import "fmt" + // + // func main() {} + UnusedImport + + /* initialization */ + + // InvalidInitCycle occurs when an invalid cycle is detected within the + // initialization graph. + // + // Example: + // var x int = f() + // + // func f() int { return x } + InvalidInitCycle + + /* decls */ + + // DuplicateDecl occurs when an identifier is declared multiple times. + // + // Example: + // var x = 1 + // var x = 2 + DuplicateDecl + + // InvalidDeclCycle occurs when a declaration cycle is not valid. + // + // Example: + // import "unsafe" + // + // type T struct { + // a [n]int + // } + // + // var n = unsafe.Sizeof(T{}) + InvalidDeclCycle + + // InvalidTypeCycle occurs when a cycle in type definitions results in a + // type that is not well-defined. + // + // Example: + // import "unsafe" + // + // type T [unsafe.Sizeof(T{})]int + InvalidTypeCycle + + /* decls > const */ + + // InvalidConstInit occurs when a const declaration has a non-constant + // initializer. + // + // Example: + // var x int + // const _ = x + InvalidConstInit + + // InvalidConstVal occurs when a const value cannot be converted to its + // target type. + // + // TODO(findleyr): this error code and example are not very clear. Consider + // removing it. + // + // Example: + // const _ = 1 << "hello" + InvalidConstVal + + // InvalidConstType occurs when the underlying type in a const declaration + // is not a valid constant type. + // + // Example: + // const c *int = 4 + InvalidConstType + + /* decls > var (+ other variable assignment codes) */ + + // UntypedNilUse occurs when the predeclared (untyped) value nil is used to + // initialize a variable declared without an explicit type. + // + // Example: + // var x = nil + UntypedNilUse + + // WrongAssignCount occurs when the number of values on the right-hand side + // of an assignment or or initialization expression does not match the number + // of variables on the left-hand side. + // + // Example: + // var x = 1, 2 + WrongAssignCount + + // UnassignableOperand occurs when the left-hand side of an assignment is + // not assignable. + // + // Example: + // func f() { + // const c = 1 + // c = 2 + // } + UnassignableOperand + + // NoNewVar occurs when a short variable declaration (':=') does not declare + // new variables. + // + // Example: + // func f() { + // x := 1 + // x := 2 + // } + NoNewVar + + // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does + // not have single-valued left-hand or right-hand side. + // + // Per the spec: + // "In assignment operations, both the left- and right-hand expression lists + // must contain exactly one single-valued expression" + // + // Example: + // func f() int { + // x, y := 1, 2 + // x, y += 1 + // return x + y + // } + MultiValAssignOp + + // InvalidIfaceAssign occurs when a value of type T is used as an + // interface, but T does not implement a method of the expected interface. + // + // Example: + // type I interface { + // f() + // } + // + // type T int + // + // var x I = T(1) + InvalidIfaceAssign + + // InvalidChanAssign occurs when a chan assignment is invalid. + // + // Per the spec, a value x is assignable to a channel type T if: + // "x is a bidirectional channel value, T is a channel type, x's type V and + // T have identical element types, and at least one of V or T is not a + // defined type." + // + // Example: + // type T1 chan int + // type T2 chan int + // + // var x T1 + // // Invalid assignment because both types are named + // var _ T2 = x + InvalidChanAssign + + // IncompatibleAssign occurs when the type of the right-hand side expression + // in an assignment cannot be assigned to the type of the variable being + // assigned. + // + // Example: + // var x []int + // var _ int = x + IncompatibleAssign + + // UnaddressableFieldAssign occurs when trying to assign to a struct field + // in a map value. + // + // Example: + // func f() { + // m := make(map[string]struct{i int}) + // m["foo"].i = 42 + // } + UnaddressableFieldAssign + + /* decls > type (+ other type expression codes) */ + + // NotAType occurs when the identifier used as the underlying type in a type + // declaration or the right-hand side of a type alias does not denote a type. + // + // Example: + // var S = 2 + // + // type T S + NotAType + + // InvalidArrayLen occurs when an array length is not a constant value. + // + // Example: + // var n = 3 + // var _ = [n]int{} + InvalidArrayLen + + // BlankIfaceMethod occurs when a method name is '_'. + // + // Per the spec: + // "The name of each explicitly specified method must be unique and not + // blank." + // + // Example: + // type T interface { + // _(int) + // } + BlankIfaceMethod + + // IncomparableMapKey occurs when a map key type does not support the == and + // != operators. + // + // Per the spec: + // "The comparison operators == and != must be fully defined for operands of + // the key type; thus the key type must not be a function, map, or slice." + // + // Example: + // var x map[T]int + // + // type T []int + IncomparableMapKey + + // InvalidIfaceEmbed occurs when a non-interface type is embedded in an + // interface. + // + // Example: + // type T struct {} + // + // func (T) m() + // + // type I interface { + // T + // } + InvalidIfaceEmbed + + // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, + // and T itself is itself a pointer, an unsafe.Pointer, or an interface. + // + // Per the spec: + // "An embedded field must be specified as a type name T or as a pointer to + // a non-interface type name *T, and T itself may not be a pointer type." + // + // Example: + // type T *int + // + // type S struct { + // *T + // } + InvalidPtrEmbed + + /* decls > func and method */ + + // BadRecv occurs when a method declaration does not have exactly one + // receiver parameter. + // + // Example: + // func () _() {} + BadRecv + + // InvalidRecv occurs when a receiver type expression is not of the form T + // or *T, or T is a pointer type. + // + // Example: + // type T struct {} + // + // func (**T) m() {} + InvalidRecv + + // DuplicateFieldAndMethod occurs when an identifier appears as both a field + // and method name. + // + // Example: + // type T struct { + // m int + // } + // + // func (T) m() {} + DuplicateFieldAndMethod + + // DuplicateMethod occurs when two methods on the same receiver type have + // the same name. + // + // Example: + // type T struct {} + // func (T) m() {} + // func (T) m(i int) int { return i } + DuplicateMethod + + /* decls > special */ + + // InvalidBlank occurs when a blank identifier is used as a value or type. + // + // Per the spec: + // "The blank identifier may appear as an operand only on the left-hand side + // of an assignment." + // + // Example: + // var x = _ + InvalidBlank + + // InvalidIota occurs when the predeclared identifier iota is used outside + // of a constant declaration. + // + // Example: + // var x = iota + InvalidIota + + // MissingInitBody occurs when an init function is missing its body. + // + // Example: + // func init() + MissingInitBody + + // InvalidInitSig occurs when an init function declares parameters or + // results. + // + // Example: + // func init() int { return 1 } + InvalidInitSig + + // InvalidInitDecl occurs when init is declared as anything other than a + // function. + // + // Example: + // var init = 1 + InvalidInitDecl + + // InvalidMainDecl occurs when main is declared as anything other than a + // function, in a main package. + InvalidMainDecl + + /* exprs */ + + // TooManyValues occurs when a function returns too many values for the + // expression context in which it is used. + // + // Example: + // func ReturnTwo() (int, int) { + // return 1, 2 + // } + // + // var x = ReturnTwo() + TooManyValues + + // NotAnExpr occurs when a type expression is used where a value expression + // is expected. + // + // Example: + // type T struct {} + // + // func f() { + // T + // } + NotAnExpr + + /* exprs > const */ + + // TruncatedFloat occurs when a float constant is truncated to an integer + // value. + // + // Example: + // var _ int = 98.6 + TruncatedFloat + + // NumericOverflow occurs when a numeric constant overflows its target type. + // + // Example: + // var x int8 = 1000 + NumericOverflow + + /* exprs > operation */ + + // UndefinedOp occurs when an operator is not defined for the type(s) used + // in an operation. + // + // Example: + // var c = "a" - "b" + UndefinedOp + + // MismatchedTypes occurs when operand types are incompatible in a binary + // operation. + // + // Example: + // var a = "hello" + // var b = 1 + // var c = a - b + MismatchedTypes + + // DivByZero occurs when a division operation is provable at compile + // time to be a division by zero. + // + // Example: + // const divisor = 0 + // var x int = 1/divisor + DivByZero + + // NonNumericIncDec occurs when an increment or decrement operator is + // applied to a non-numeric value. + // + // Example: + // func f() { + // var c = "c" + // c++ + // } + NonNumericIncDec + + /* exprs > ptr */ + + // UnaddressableOperand occurs when the & operator is applied to an + // unaddressable expression. + // + // Example: + // var x = &1 + UnaddressableOperand + + // InvalidIndirection occurs when a non-pointer value is indirected via the + // '*' operator. + // + // Example: + // var x int + // var y = *x + InvalidIndirection + + /* exprs > [] */ + + // NonIndexableOperand occurs when an index operation is applied to a value + // that cannot be indexed. + // + // Example: + // var x = 1 + // var y = x[1] + NonIndexableOperand + + // InvalidIndex occurs when an index argument is not of integer type, + // negative, or out-of-bounds. + // + // Example: + // var s = [...]int{1,2,3} + // var x = s[5] + // + // Example: + // var s = []int{1,2,3} + // var _ = s[-1] + // + // Example: + // var s = []int{1,2,3} + // var i string + // var _ = s[i] + InvalidIndex + + // SwappedSliceIndices occurs when constant indices in a slice expression + // are decreasing in value. + // + // Example: + // var _ = []int{1,2,3}[2:1] + SwappedSliceIndices + + /* operators > slice */ + + // NonSliceableOperand occurs when a slice operation is applied to a value + // whose type is not sliceable, or is unaddressable. + // + // Example: + // var x = [...]int{1, 2, 3}[:1] + // + // Example: + // var x = 1 + // var y = 1[:1] + NonSliceableOperand + + // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is + // applied to a string. + // + // Example: + // var s = "hello" + // var x = s[1:2:3] + InvalidSliceExpr + + /* exprs > shift */ + + // InvalidShiftCount occurs when the right-hand side of a shift operation is + // either non-integer, negative, or too large. + // + // Example: + // var ( + // x string + // y int = 1 << x + // ) + InvalidShiftCount + + // InvalidShiftOperand occurs when the shifted operand is not an integer. + // + // Example: + // var s = "hello" + // var x = s << 2 + InvalidShiftOperand + + /* exprs > chan */ + + // InvalidReceive occurs when there is a channel receive from a value that + // is either not a channel, or is a send-only channel. + // + // Example: + // func f() { + // var x = 1 + // <-x + // } + InvalidReceive + + // InvalidSend occurs when there is a channel send to a value that is not a + // channel, or is a receive-only channel. + // + // Example: + // func f() { + // var x = 1 + // x <- "hello!" + // } + InvalidSend + + /* exprs > literal */ + + // DuplicateLitKey occurs when an index is duplicated in a slice, array, or + // map literal. + // + // Example: + // var _ = []int{0:1, 0:2} + // + // Example: + // var _ = map[string]int{"a": 1, "a": 2} + DuplicateLitKey + + // MissingLitKey occurs when a map literal is missing a key expression. + // + // Example: + // var _ = map[string]int{1} + MissingLitKey + + // InvalidLitIndex occurs when the key in a key-value element of a slice or + // array literal is not an integer constant. + // + // Example: + // var i = 0 + // var x = []string{i: "world"} + InvalidLitIndex + + // OversizeArrayLit occurs when an array literal exceeds its length. + // + // Example: + // var _ = [2]int{1,2,3} + OversizeArrayLit + + // MixedStructLit occurs when a struct literal contains a mix of positional + // and named elements. + // + // Example: + // var _ = struct{i, j int}{i: 1, 2} + MixedStructLit + + // InvalidStructLit occurs when a positional struct literal has an incorrect + // number of values. + // + // Example: + // var _ = struct{i, j int}{1,2,3} + InvalidStructLit + + // MissingLitField occurs when a struct literal refers to a field that does + // not exist on the struct type. + // + // Example: + // var _ = struct{i int}{j: 2} + MissingLitField + + // DuplicateLitField occurs when a struct literal contains duplicated + // fields. + // + // Example: + // var _ = struct{i int}{i: 1, i: 2} + DuplicateLitField + + // UnexportedLitField occurs when a positional struct literal implicitly + // assigns an unexported field of an imported type. + UnexportedLitField + + // InvalidLitField occurs when a field name is not a valid identifier. + // + // Example: + // var _ = struct{i int}{1: 1} + InvalidLitField + + // UntypedLit occurs when a composite literal omits a required type + // identifier. + // + // Example: + // type outer struct{ + // inner struct { i int } + // } + // + // var _ = outer{inner: {1}} + UntypedLit + + // InvalidLit occurs when a composite literal expression does not match its + // type. + // + // Example: + // type P *struct{ + // x int + // } + // var _ = P {} + InvalidLit + + /* exprs > selector */ + + // AmbiguousSelector occurs when a selector is ambiguous. + // + // Example: + // type E1 struct { i int } + // type E2 struct { i int } + // type T struct { E1; E2 } + // + // var x T + // var _ = x.i + AmbiguousSelector + + // UndeclaredImportedName occurs when a package-qualified identifier is + // undeclared by the imported package. + // + // Example: + // import "go/types" + // + // var _ = types.NotAnActualIdentifier + UndeclaredImportedName + + // UnexportedName occurs when a selector refers to an unexported identifier + // of an imported package. + // + // Example: + // import "reflect" + // + // type _ reflect.flag + UnexportedName + + // UndeclaredName occurs when an identifier is not declared in the current + // scope. + // + // Example: + // var x T + UndeclaredName + + // MissingFieldOrMethod occurs when a selector references a field or method + // that does not exist. + // + // Example: + // type T struct {} + // + // var x = T{}.f + MissingFieldOrMethod + + /* exprs > ... */ + + // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is + // not valid. + // + // Example: + // var _ = map[int][...]int{0: {}} + BadDotDotDotSyntax + + // NonVariadicDotDotDot occurs when a "..." is used on the final argument to + // a non-variadic function. + // + // Example: + // func printArgs(s []string) { + // for _, a := range s { + // println(a) + // } + // } + // + // func f() { + // s := []string{"a", "b", "c"} + // printArgs(s...) + // } + NonVariadicDotDotDot + + // MisplacedDotDotDot occurs when a "..." is used somewhere other than the + // final argument to a function call. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := []int{1,2,3} + // printArgs(0, a...) + // } + MisplacedDotDotDot + + // InvalidDotDotDotOperand occurs when a "..." operator is applied to a + // single-valued operand. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := 1 + // printArgs(a...) + // } + // + // Example: + // func args() (int, int) { + // return 1, 2 + // } + // + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func g() { + // printArgs(args()...) + // } + InvalidDotDotDotOperand + + // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in + // function. + // + // Example: + // var s = []int{1, 2, 3} + // var l = len(s...) + InvalidDotDotDot + + /* exprs > built-in */ + + // UncalledBuiltin occurs when a built-in function is used as a + // function-valued expression, instead of being called. + // + // Per the spec: + // "The built-in functions do not have standard Go types, so they can only + // appear in call expressions; they cannot be used as function values." + // + // Example: + // var _ = copy + UncalledBuiltin + + // InvalidAppend occurs when append is called with a first argument that is + // not a slice. + // + // Example: + // var _ = append(1, 2) + InvalidAppend + + // InvalidCap occurs when an argument to the cap built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = cap(s) + InvalidCap + + // InvalidClose occurs when close(...) is called with an argument that is + // not of channel type, or that is a receive-only channel. + // + // Example: + // func f() { + // var x int + // close(x) + // } + InvalidClose + + // InvalidCopy occurs when the arguments are not of slice type or do not + // have compatible type. + // + // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // information on the type requirements for the copy built-in. + // + // Example: + // func f() { + // var x []int + // y := []int64{1,2,3} + // copy(x, y) + // } + InvalidCopy + + // InvalidComplex occurs when the complex built-in function is called with + // arguments with incompatible types. + // + // Example: + // var _ = complex(float32(1), float64(2)) + InvalidComplex + + // InvalidDelete occurs when the delete built-in function is called with a + // first argument that is not a map. + // + // Example: + // func f() { + // m := "hello" + // delete(m, "e") + // } + InvalidDelete + + // InvalidImag occurs when the imag built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = imag(int(1)) + InvalidImag + + // InvalidLen occurs when an argument to the len built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = len(s) + InvalidLen + + // SwappedMakeArgs occurs when make is called with three arguments, and its + // length argument is larger than its capacity argument. + // + // Example: + // var x = make([]int, 3, 2) + SwappedMakeArgs + + // InvalidMake occurs when make is called with an unsupported type argument. + // + // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // information on the types that may be created using make. + // + // Example: + // var x = make(int) + InvalidMake + + // InvalidReal occurs when the real built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = real(int(1)) + InvalidReal + + /* exprs > assertion */ + + // InvalidAssert occurs when a type assertion is applied to a + // value that is not of interface type. + // + // Example: + // var x = 1 + // var _ = x.(float64) + InvalidAssert + + // ImpossibleAssert occurs for a type assertion x.(T) when the value x of + // interface cannot have dynamic type T, due to a missing or mismatching + // method on T. + // + // Example: + // type T int + // + // func (t *T) m() int { return int(*t) } + // + // type I interface { m() int } + // + // var x I + // var _ = x.(T) + ImpossibleAssert + + /* exprs > conversion */ + + // InvalidConversion occurs when the argument type cannot be converted to the + // target. + // + // See https://golang.org/ref/spec#Conversions for the rules of + // convertibility. + // + // Example: + // var x float64 + // var _ = string(x) + InvalidConversion + + // InvalidUntypedConversion occurs when an there is no valid implicit + // conversion from an untyped value satisfying the type constraints of the + // context in which it is used. + // + // Example: + // var _ = 1 + "" + InvalidUntypedConversion + + /* offsetof */ + + // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument + // that is not a selector expression. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Offsetof(x) + BadOffsetofSyntax + + // InvalidOffsetof occurs when unsafe.Offsetof is called with a method + // selector, rather than a field selector, or when the field is embedded via + // a pointer. + // + // Per the spec: + // + // "If f is an embedded field, it must be reachable without pointer + // indirections through fields of the struct. " + // + // Example: + // import "unsafe" + // + // type T struct { f int } + // type S struct { *T } + // var s S + // var _ = unsafe.Offsetof(s.f) + // + // Example: + // import "unsafe" + // + // type S struct{} + // + // func (S) m() {} + // + // var s S + // var _ = unsafe.Offsetof(s.m) + InvalidOffsetof + + /* control flow > scope */ + + // UnusedExpr occurs when a side-effect free expression is used as a + // statement. Such a statement has no effect. + // + // Example: + // func f(i int) { + // i*i + // } + UnusedExpr + + // UnusedVar occurs when a variable is declared but unused. + // + // Example: + // func f() { + // x := 1 + // } + UnusedVar + + // MissingReturn occurs when a function with results is missing a return + // statement. + // + // Example: + // func f() int {} + MissingReturn + + // WrongResultCount occurs when a return statement returns an incorrect + // number of values. + // + // Example: + // func ReturnOne() int { + // return 1, 2 + // } + WrongResultCount + + // OutOfScopeResult occurs when the name of a value implicitly returned by + // an empty return statement is shadowed in a nested scope. + // + // Example: + // func factor(n int) (i int) { + // for i := 2; i < n; i++ { + // if n%i == 0 { + // return + // } + // } + // return 0 + // } + OutOfScopeResult + + /* control flow > if */ + + // InvalidCond occurs when an if condition is not a boolean expression. + // + // Example: + // func checkReturn(i int) { + // if i { + // panic("non-zero return") + // } + // } + InvalidCond + + /* control flow > for */ + + // InvalidPostDecl occurs when there is a declaration in a for-loop post + // statement. + // + // Example: + // func f() { + // for i := 0; i < 10; j := 0 {} + // } + InvalidPostDecl + + // InvalidChanRange occurs when a send-only channel used in a range + // expression. + // + // Example: + // func sum(c chan<- int) { + // s := 0 + // for i := range c { + // s += i + // } + // } + InvalidChanRange + + // InvalidIterVar occurs when two iteration variables are used while ranging + // over a channel. + // + // Example: + // func f(c chan int) { + // for k, v := range c { + // println(k, v) + // } + // } + InvalidIterVar + + // InvalidRangeExpr occurs when the type of a range expression is not array, + // slice, string, map, or channel. + // + // Example: + // func f(i int) { + // for j := range i { + // println(j) + // } + // } + InvalidRangeExpr + + /* control flow > switch */ + + // MisplacedBreak occurs when a break statement is not within a for, switch, + // or select statement of the innermost function definition. + // + // Example: + // func f() { + // break + // } + MisplacedBreak + + // MisplacedContinue occurs when a continue statement is not within a for + // loop of the innermost function definition. + // + // Example: + // func sumeven(n int) int { + // proceed := func() { + // continue + // } + // sum := 0 + // for i := 1; i <= n; i++ { + // if i % 2 != 0 { + // proceed() + // } + // sum += i + // } + // return sum + // } + MisplacedContinue + + // MisplacedFallthrough occurs when a fallthrough statement is not within an + // expression switch. + // + // Example: + // func typename(i interface{}) string { + // switch i.(type) { + // case int64: + // fallthrough + // case int: + // return "int" + // } + // return "unsupported" + // } + MisplacedFallthrough + + // DuplicateCase occurs when a type or expression switch has duplicate + // cases. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // case 1: + // println("One") + // } + // } + DuplicateCase + + // DuplicateDefault occurs when a type or expression switch has multiple + // default clauses. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // default: + // println("One") + // default: + // println("1") + // } + // } + DuplicateDefault + + // BadTypeKeyword occurs when a .(type) expression is used anywhere other + // than a type switch. + // + // Example: + // type I interface { + // m() + // } + // var t I + // var _ = t.(type) + BadTypeKeyword + + // InvalidTypeSwitch occurs when .(type) is used on an expression that is + // not of interface type. + // + // Example: + // func f(i int) { + // switch x := i.(type) {} + // } + InvalidTypeSwitch + + // InvalidExprSwitch occurs when a switch expression is not comparable. + // + // Example: + // func _() { + // var a struct{ _ func() } + // switch a /* ERROR cannot switch on a */ { + // } + // } + InvalidExprSwitch + + /* control flow > select */ + + // InvalidSelectCase occurs when a select case is not a channel send or + // receive. + // + // Example: + // func checkChan(c <-chan int) bool { + // select { + // case c: + // return true + // default: + // return false + // } + // } + InvalidSelectCase + + /* control flow > labels and jumps */ + + // UndeclaredLabel occurs when an undeclared label is jumped to. + // + // Example: + // func f() { + // goto L + // } + UndeclaredLabel + + // DuplicateLabel occurs when a label is declared more than once. + // + // Example: + // func f() int { + // L: + // L: + // return 1 + // } + DuplicateLabel + + // MisplacedLabel occurs when a break or continue label is not on a for, + // switch, or select statement. + // + // Example: + // func f() { + // L: + // a := []int{1,2,3} + // for _, e := range a { + // if e > 10 { + // break L + // } + // println(a) + // } + // } + MisplacedLabel + + // UnusedLabel occurs when a label is declared but not used. + // + // Example: + // func f() { + // L: + // } + UnusedLabel + + // JumpOverDecl occurs when a label jumps over a variable declaration. + // + // Example: + // func f() int { + // goto L + // x := 2 + // L: + // x++ + // return x + // } + JumpOverDecl + + // JumpIntoBlock occurs when a forward jump goes to a label inside a nested + // block. + // + // Example: + // func f(x int) { + // goto L + // if x > 0 { + // L: + // print("inside block") + // } + // } + JumpIntoBlock + + /* control flow > calls */ + + // InvalidMethodExpr occurs when a pointer method is called but the argument + // is not addressable. + // + // Example: + // type T struct {} + // + // func (*T) m() int { return 1 } + // + // var _ = T.m(T{}) + InvalidMethodExpr + + // WrongArgCount occurs when too few or too many arguments are passed by a + // function call. + // + // Example: + // func f(i int) {} + // var x = f() + WrongArgCount + + // InvalidCall occurs when an expression is called that is not of function + // type. + // + // Example: + // var x = "x" + // var y = x() + InvalidCall + + /* control flow > suspended */ + + // UnusedResults occurs when a restricted expression-only built-in function + // is suspended via go or defer. Such a suspension discards the results of + // these side-effect free built-in functions, and therefore is ineffectual. + // + // Example: + // func f(a []int) int { + // defer len(a) + // return i + // } + UnusedResults + + // InvalidDefer occurs when a deferred expression is not a function call, + // for example if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // defer int32(i) + // return i + // } + InvalidDefer + + // InvalidGo occurs when a go expression is not a function call, for example + // if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // go int32(i) + // return i + // } + InvalidGo + + // All codes below were added in Go 1.17. + + /* decl */ + + // BadDecl occurs when a declaration has invalid syntax. + BadDecl + + // RepeatedDecl occurs when an identifier occurs more than once on the left + // hand side of a short variable declaration. + // + // Example: + // func _() { + // x, y, y := 1, 2, 3 + // } + RepeatedDecl + + /* unsafe */ + + // InvalidUnsafeAdd occurs when unsafe.Add is called with a + // length argument that is not of integer type. + // + // Example: + // import "unsafe" + // + // var p unsafe.Pointer + // var _ = unsafe.Add(p, float64(1)) + InvalidUnsafeAdd + + // InvalidUnsafeSlice occurs when unsafe.Slice is called with a + // pointer argument that is not of pointer type or a length argument + // that is not of integer type, negative, or out of bounds. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(x, 1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, float64(1)) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, -1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, uint64(1) << 63) + InvalidUnsafeSlice + + // All codes below were added in Go 1.18. + + /* features */ + + // UnsupportedFeature occurs when a language feature is used that is not + // supported at this Go version. + UnsupportedFeature + + /* type params */ + + // NotAGenericType occurs when a non-generic type is used where a generic + // type is expected: in type or function instantiation. + // + // Example: + // type T int + // + // var _ T[int] + NotAGenericType + + // WrongTypeArgCount occurs when a type or function is instantiated with an + // incorrent number of type arguments, including when a generic type or + // function is used without instantiation. + // + // Errors inolving failed type inference are assigned other error codes. + // + // Example: + // type T[p any] int + // + // var _ T[int, string] + // + // Example: + // func f[T any]() {} + // + // var x = f + WrongTypeArgCount + + // CannotInferTypeArgs occurs when type or function type argument inference + // fails to infer all type arguments. + // + // Example: + // func f[T any]() {} + // + // func _() { + // f() + // } + // + // Example: + // type N[P, Q any] struct{} + // + // var _ N[int] + CannotInferTypeArgs + + // InvalidTypeArg occurs when a type argument does not satisfy its + // corresponding type parameter constraints. + // + // Example: + // type T[P ~int] struct{} + // + // var _ T[string] + InvalidTypeArg // arguments? InferenceFailed + + // InvalidInstanceCycle occurs when an invalid cycle is detected + // within the instantiation graph. + // + // Example: + // func f[T any]() { f[*T]() } + InvalidInstanceCycle + + // InvalidUnion occurs when an embedded union or approximation element is + // not valid. + // + // Example: + // type _ interface { + // ~int | interface{ m() } + // } + InvalidUnion + + // MisplacedConstraintIface occurs when a constraint-type interface is used + // outside of constraint position. + // + // Example: + // type I interface { ~int } + // + // var _ I + MisplacedConstraintIface + + // InvalidMethodTypeParams occurs when methods have type parameters. + // + // It cannot be encountered with an AST parsed using go/parser. + InvalidMethodTypeParams + + // MisplacedTypeParam occurs when a type parameter is used in a place where + // it is not permitted. + // + // Example: + // type T[P any] P + // + // Example: + // type T[P any] struct{ *P } + MisplacedTypeParam + + // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with + // an argument that is not of slice type. It also occurs if it is used + // in a package compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.SliceData(x) + InvalidUnsafeSliceData + + // InvalidUnsafeString occurs when unsafe.String is called with + // a length argument that is not of integer type, negative, or + // out of bounds. It also occurs if it is used in a package + // compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var b [10]byte + // var _ = unsafe.String(&b[0], -1) + InvalidUnsafeString + + // InvalidUnsafeStringData occurs if it is used in a package + // compiled for a language version before go1.20. + _ // not used anymore + +) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go new file mode 100644 index 0000000000..15ecf7c5de --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -0,0 +1,179 @@ +// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. + +package typesinternal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSyntaxTree - -1] + _ = x[Test-1] + _ = x[BlankPkgName-2] + _ = x[MismatchedPkgName-3] + _ = x[InvalidPkgUse-4] + _ = x[BadImportPath-5] + _ = x[BrokenImport-6] + _ = x[ImportCRenamed-7] + _ = x[UnusedImport-8] + _ = x[InvalidInitCycle-9] + _ = x[DuplicateDecl-10] + _ = x[InvalidDeclCycle-11] + _ = x[InvalidTypeCycle-12] + _ = x[InvalidConstInit-13] + _ = x[InvalidConstVal-14] + _ = x[InvalidConstType-15] + _ = x[UntypedNilUse-16] + _ = x[WrongAssignCount-17] + _ = x[UnassignableOperand-18] + _ = x[NoNewVar-19] + _ = x[MultiValAssignOp-20] + _ = x[InvalidIfaceAssign-21] + _ = x[InvalidChanAssign-22] + _ = x[IncompatibleAssign-23] + _ = x[UnaddressableFieldAssign-24] + _ = x[NotAType-25] + _ = x[InvalidArrayLen-26] + _ = x[BlankIfaceMethod-27] + _ = x[IncomparableMapKey-28] + _ = x[InvalidIfaceEmbed-29] + _ = x[InvalidPtrEmbed-30] + _ = x[BadRecv-31] + _ = x[InvalidRecv-32] + _ = x[DuplicateFieldAndMethod-33] + _ = x[DuplicateMethod-34] + _ = x[InvalidBlank-35] + _ = x[InvalidIota-36] + _ = x[MissingInitBody-37] + _ = x[InvalidInitSig-38] + _ = x[InvalidInitDecl-39] + _ = x[InvalidMainDecl-40] + _ = x[TooManyValues-41] + _ = x[NotAnExpr-42] + _ = x[TruncatedFloat-43] + _ = x[NumericOverflow-44] + _ = x[UndefinedOp-45] + _ = x[MismatchedTypes-46] + _ = x[DivByZero-47] + _ = x[NonNumericIncDec-48] + _ = x[UnaddressableOperand-49] + _ = x[InvalidIndirection-50] + _ = x[NonIndexableOperand-51] + _ = x[InvalidIndex-52] + _ = x[SwappedSliceIndices-53] + _ = x[NonSliceableOperand-54] + _ = x[InvalidSliceExpr-55] + _ = x[InvalidShiftCount-56] + _ = x[InvalidShiftOperand-57] + _ = x[InvalidReceive-58] + _ = x[InvalidSend-59] + _ = x[DuplicateLitKey-60] + _ = x[MissingLitKey-61] + _ = x[InvalidLitIndex-62] + _ = x[OversizeArrayLit-63] + _ = x[MixedStructLit-64] + _ = x[InvalidStructLit-65] + _ = x[MissingLitField-66] + _ = x[DuplicateLitField-67] + _ = x[UnexportedLitField-68] + _ = x[InvalidLitField-69] + _ = x[UntypedLit-70] + _ = x[InvalidLit-71] + _ = x[AmbiguousSelector-72] + _ = x[UndeclaredImportedName-73] + _ = x[UnexportedName-74] + _ = x[UndeclaredName-75] + _ = x[MissingFieldOrMethod-76] + _ = x[BadDotDotDotSyntax-77] + _ = x[NonVariadicDotDotDot-78] + _ = x[MisplacedDotDotDot-79] + _ = x[InvalidDotDotDotOperand-80] + _ = x[InvalidDotDotDot-81] + _ = x[UncalledBuiltin-82] + _ = x[InvalidAppend-83] + _ = x[InvalidCap-84] + _ = x[InvalidClose-85] + _ = x[InvalidCopy-86] + _ = x[InvalidComplex-87] + _ = x[InvalidDelete-88] + _ = x[InvalidImag-89] + _ = x[InvalidLen-90] + _ = x[SwappedMakeArgs-91] + _ = x[InvalidMake-92] + _ = x[InvalidReal-93] + _ = x[InvalidAssert-94] + _ = x[ImpossibleAssert-95] + _ = x[InvalidConversion-96] + _ = x[InvalidUntypedConversion-97] + _ = x[BadOffsetofSyntax-98] + _ = x[InvalidOffsetof-99] + _ = x[UnusedExpr-100] + _ = x[UnusedVar-101] + _ = x[MissingReturn-102] + _ = x[WrongResultCount-103] + _ = x[OutOfScopeResult-104] + _ = x[InvalidCond-105] + _ = x[InvalidPostDecl-106] + _ = x[InvalidChanRange-107] + _ = x[InvalidIterVar-108] + _ = x[InvalidRangeExpr-109] + _ = x[MisplacedBreak-110] + _ = x[MisplacedContinue-111] + _ = x[MisplacedFallthrough-112] + _ = x[DuplicateCase-113] + _ = x[DuplicateDefault-114] + _ = x[BadTypeKeyword-115] + _ = x[InvalidTypeSwitch-116] + _ = x[InvalidExprSwitch-117] + _ = x[InvalidSelectCase-118] + _ = x[UndeclaredLabel-119] + _ = x[DuplicateLabel-120] + _ = x[MisplacedLabel-121] + _ = x[UnusedLabel-122] + _ = x[JumpOverDecl-123] + _ = x[JumpIntoBlock-124] + _ = x[InvalidMethodExpr-125] + _ = x[WrongArgCount-126] + _ = x[InvalidCall-127] + _ = x[UnusedResults-128] + _ = x[InvalidDefer-129] + _ = x[InvalidGo-130] + _ = x[BadDecl-131] + _ = x[RepeatedDecl-132] + _ = x[InvalidUnsafeAdd-133] + _ = x[InvalidUnsafeSlice-134] + _ = x[UnsupportedFeature-135] + _ = x[NotAGenericType-136] + _ = x[WrongTypeArgCount-137] + _ = x[CannotInferTypeArgs-138] + _ = x[InvalidTypeArg-139] + _ = x[InvalidInstanceCycle-140] + _ = x[InvalidUnion-141] + _ = x[MisplacedConstraintIface-142] + _ = x[InvalidMethodTypeParams-143] + _ = x[MisplacedTypeParam-144] + _ = x[InvalidUnsafeSliceData-145] + _ = x[InvalidUnsafeString-146] +} + +const ( + _ErrorCode_name_0 = "InvalidSyntaxTree" + _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" +) + +var ( + _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} +) + +func (i ErrorCode) String() string { + switch { + case i == -1: + return _ErrorCode_name_0 + case 1 <= i && i <= 146: + i -= 1 + return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] + default: + return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go b/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go new file mode 100644 index 0000000000..5e96e89557 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import "go/types" + +// This file contains back doors that allow gopls to avoid method sorting when +// using the objectpath package. +// +// This is performance-critical in certain repositories, but changing the +// behavior of the objectpath package is still being discussed in +// golang/go#61443. If we decide to remove the sorting in objectpath we can +// simply delete these back doors. Otherwise, we should add a new API to +// objectpath that allows controlling the sorting. + +// SkipEncoderMethodSorting marks enc (which must be an *objectpath.Encoder) as +// not requiring sorted methods. +var SkipEncoderMethodSorting func(enc interface{}) + +// ObjectpathObject is like objectpath.Object, but allows suppressing method +// sorting. +var ObjectpathObject func(pkg *types.Package, p string, skipMethodSorting bool) (types.Object, error) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 0000000000..ce7d4351b2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,52 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. +package typesinternal + +import ( + "go/token" + "go/types" + "reflect" + "unsafe" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} + +// ReadGo116ErrorData extracts additional information from types.Error values +// generated by Go version 1.16 and later: the error code, start position, and +// end position. If all positions are valid, start <= err.Pos <= end. +// +// If the data could not be read, the final result parameter will be false. +func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(err) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} + +var SetGoVersion = func(conf *types.Config, version string) bool { return false } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go new file mode 100644 index 0000000000..a42b072a67 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go @@ -0,0 +1,19 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typesinternal + +import ( + "go/types" +) + +func init() { + SetGoVersion = func(conf *types.Config, version string) bool { + conf.GoVersion = version + return true + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index aa147bc7d3..da68826d6a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -22,6 +22,9 @@ cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb +# dario.cat/mergo v1.0.0 +## explicit; go 1.13 +dario.cat/mergo # github.com/Azure/azure-sdk-for-go v68.0.0+incompatible ## explicit github.com/Azure/azure-sdk-for-go/profiles/2019-03-01/compute/mgmt/compute @@ -110,6 +113,42 @@ github.com/Azure/go-autorest/tracing ## explicit; go 1.16 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal +# github.com/Microsoft/go-winio v0.6.1 +## explicit; go 1.17 +github.com/Microsoft/go-winio +github.com/Microsoft/go-winio/backuptar +github.com/Microsoft/go-winio/internal/fs +github.com/Microsoft/go-winio/internal/socket +github.com/Microsoft/go-winio/internal/stringbuffer +github.com/Microsoft/go-winio/pkg/guid +github.com/Microsoft/go-winio/vhd +# github.com/Microsoft/hcsshim v0.12.0-rc.1 +## explicit; go 1.18 +github.com/Microsoft/hcsshim +github.com/Microsoft/hcsshim/computestorage +github.com/Microsoft/hcsshim/internal/cow +github.com/Microsoft/hcsshim/internal/hcs +github.com/Microsoft/hcsshim/internal/hcs/schema1 +github.com/Microsoft/hcsshim/internal/hcs/schema2 +github.com/Microsoft/hcsshim/internal/hcserror +github.com/Microsoft/hcsshim/internal/hns +github.com/Microsoft/hcsshim/internal/interop +github.com/Microsoft/hcsshim/internal/jobobject +github.com/Microsoft/hcsshim/internal/log +github.com/Microsoft/hcsshim/internal/logfields +github.com/Microsoft/hcsshim/internal/longpath +github.com/Microsoft/hcsshim/internal/memory +github.com/Microsoft/hcsshim/internal/mergemaps +github.com/Microsoft/hcsshim/internal/oc +github.com/Microsoft/hcsshim/internal/protocol/guestrequest +github.com/Microsoft/hcsshim/internal/queue +github.com/Microsoft/hcsshim/internal/safefile +github.com/Microsoft/hcsshim/internal/security +github.com/Microsoft/hcsshim/internal/timeout +github.com/Microsoft/hcsshim/internal/vmcompute +github.com/Microsoft/hcsshim/internal/wclayer +github.com/Microsoft/hcsshim/internal/winapi +github.com/Microsoft/hcsshim/osversion # github.com/VividCortex/ewma v1.2.0 ## explicit; go 1.12 github.com/VividCortex/ewma @@ -176,15 +215,27 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface +# github.com/containerd/cgroups/v3 v3.0.2 +## explicit; go 1.18 +github.com/containerd/cgroups/v3/cgroup1/stats +# github.com/containerd/containerd v1.7.9 +## explicit; go 1.19 +github.com/containerd/containerd/errdefs +# github.com/containerd/stargz-snapshotter/estargz v0.15.1 +## explicit; go 1.19 +github.com/containerd/stargz-snapshotter/estargz +github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containers/common v0.57.0 ## explicit; go 1.18 github.com/containers/common/pkg/retry # github.com/containers/image/v5 v5.29.0 ## explicit; go 1.19 github.com/containers/image/v5/copy +github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath github.com/containers/image/v5/docker github.com/containers/image/v5/docker/archive +github.com/containers/image/v5/docker/daemon github.com/containers/image/v5/docker/internal/tarfile github.com/containers/image/v5/docker/policyconfiguration github.com/containers/image/v5/docker/reference @@ -214,6 +265,8 @@ github.com/containers/image/v5/manifest github.com/containers/image/v5/oci/archive github.com/containers/image/v5/oci/internal github.com/containers/image/v5/oci/layout +github.com/containers/image/v5/openshift +github.com/containers/image/v5/ostree github.com/containers/image/v5/pkg/blobinfocache github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize github.com/containers/image/v5/pkg/blobinfocache/memory @@ -226,13 +279,17 @@ github.com/containers/image/v5/pkg/docker/config github.com/containers/image/v5/pkg/strslice github.com/containers/image/v5/pkg/sysregistriesv2 github.com/containers/image/v5/pkg/tlsclientconfig +github.com/containers/image/v5/sif github.com/containers/image/v5/signature github.com/containers/image/v5/signature/internal github.com/containers/image/v5/signature/signer github.com/containers/image/v5/signature/sigstore github.com/containers/image/v5/signature/sigstore/internal github.com/containers/image/v5/signature/simplesigning +github.com/containers/image/v5/storage +github.com/containers/image/v5/tarball github.com/containers/image/v5/transports +github.com/containers/image/v5/transports/alltransports github.com/containers/image/v5/types github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 @@ -256,25 +313,59 @@ github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider # github.com/containers/storage v1.51.0 ## explicit; go 1.19 +github.com/containers/storage +github.com/containers/storage/drivers +github.com/containers/storage/drivers/aufs +github.com/containers/storage/drivers/btrfs +github.com/containers/storage/drivers/copy +github.com/containers/storage/drivers/devmapper +github.com/containers/storage/drivers/overlay +github.com/containers/storage/drivers/overlayutils +github.com/containers/storage/drivers/quota +github.com/containers/storage/drivers/register +github.com/containers/storage/drivers/vfs +github.com/containers/storage/drivers/windows +github.com/containers/storage/drivers/zfs github.com/containers/storage/pkg/archive +github.com/containers/storage/pkg/chrootarchive +github.com/containers/storage/pkg/chunked github.com/containers/storage/pkg/chunked/compressor +github.com/containers/storage/pkg/chunked/dump github.com/containers/storage/pkg/chunked/internal +github.com/containers/storage/pkg/config +github.com/containers/storage/pkg/devicemapper +github.com/containers/storage/pkg/directory +github.com/containers/storage/pkg/dmesg github.com/containers/storage/pkg/fileutils +github.com/containers/storage/pkg/fsutils github.com/containers/storage/pkg/homedir +github.com/containers/storage/pkg/idmap github.com/containers/storage/pkg/idtools github.com/containers/storage/pkg/ioutils +github.com/containers/storage/pkg/locker github.com/containers/storage/pkg/lockfile github.com/containers/storage/pkg/longpath +github.com/containers/storage/pkg/loopback github.com/containers/storage/pkg/mount +github.com/containers/storage/pkg/parsers +github.com/containers/storage/pkg/parsers/kernel github.com/containers/storage/pkg/pools github.com/containers/storage/pkg/promise github.com/containers/storage/pkg/reexec github.com/containers/storage/pkg/regexp +github.com/containers/storage/pkg/stringid +github.com/containers/storage/pkg/stringutils github.com/containers/storage/pkg/system +github.com/containers/storage/pkg/tarlog +github.com/containers/storage/pkg/truncindex github.com/containers/storage/pkg/unshare +github.com/containers/storage/types # github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer +# github.com/cyphar/filepath-securejoin v0.2.4 +## explicit; go 1.13 +github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -286,18 +377,38 @@ github.com/dimchansky/utfbom github.com/distribution/reference # github.com/docker/distribution v2.8.3+incompatible ## explicit +github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/client/auth/challenge # github.com/docker/docker v24.0.7+incompatible ## explicit +github.com/docker/docker/api +github.com/docker/docker/api/types +github.com/docker/docker/api/types/blkiodev +github.com/docker/docker/api/types/container +github.com/docker/docker/api/types/events +github.com/docker/docker/api/types/filters +github.com/docker/docker/api/types/image +github.com/docker/docker/api/types/mount +github.com/docker/docker/api/types/network +github.com/docker/docker/api/types/registry +github.com/docker/docker/api/types/strslice +github.com/docker/docker/api/types/swarm +github.com/docker/docker/api/types/swarm/runtime +github.com/docker/docker/api/types/time github.com/docker/docker/api/types/versions +github.com/docker/docker/api/types/volume +github.com/docker/docker/client +github.com/docker/docker/errdefs # github.com/docker/docker-credential-helpers v0.8.0 ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials # github.com/docker/go-connections v0.4.0 ## explicit +github.com/docker/go-connections/nat +github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.5.0 ## explicit @@ -357,6 +468,9 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings +# github.com/gogo/protobuf v1.3.2 +## explicit; go 1.15 +github.com/gogo/protobuf/proto # github.com/golang-jwt/jwt/v4 v4.5.0 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 @@ -381,6 +495,11 @@ github.com/google/go-cmp/cmp/internal/value # github.com/google/go-containerregistry v0.16.1 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name +github.com/google/go-containerregistry/pkg/v1 +github.com/google/go-containerregistry/pkg/v1/types +# github.com/google/go-intervals v0.0.2 +## explicit; go 1.12 +github.com/google/go-intervals/intervalset # github.com/google/s2a-go v0.1.7 ## explicit; go 1.19 github.com/google/s2a-go @@ -499,12 +618,18 @@ github.com/mailru/easyjson/jwriter # github.com/mattn/go-runewidth v0.0.15 ## explicit; go 1.9 github.com/mattn/go-runewidth +# github.com/mattn/go-shellwords v1.0.12 +## explicit; go 1.13 +github.com/mattn/go-shellwords # github.com/mattn/go-sqlite3 v1.14.18 ## explicit; go 1.16 github.com/mattn/go-sqlite3 # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 github.com/miekg/pkcs11 +# github.com/mistifyio/go-zfs/v3 v3.0.1 +## explicit; go 1.14 +github.com/mistifyio/go-zfs/v3 # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir @@ -526,6 +651,7 @@ github.com/oklog/ulid # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest +github.com/opencontainers/go-digest/digestset # github.com/opencontainers/image-spec v1.1.0-rc5 ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go @@ -536,6 +662,12 @@ github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.1.0 ## explicit github.com/opencontainers/runtime-spec/specs-go +# github.com/opencontainers/selinux v1.11.0 +## explicit; go 1.19 +github.com/opencontainers/selinux/go-selinux +github.com/opencontainers/selinux/go-selinux/label +github.com/opencontainers/selinux/pkg/pwalk +github.com/opencontainers/selinux/pkg/pwalkdir # github.com/oracle/oci-go-sdk/v54 v54.0.0 ## explicit; go 1.13 github.com/oracle/oci-go-sdk/v54/common @@ -545,6 +677,10 @@ github.com/oracle/oci-go-sdk/v54/identity github.com/oracle/oci-go-sdk/v54/objectstorage github.com/oracle/oci-go-sdk/v54/objectstorage/transfer github.com/oracle/oci-go-sdk/v54/workrequests +# github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f +## explicit +github.com/ostreedev/ostree-go/pkg/glibobject +github.com/ostreedev/ostree-go/pkg/otbuiltin # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -591,9 +727,15 @@ github.com/stefanberger/go-pkcs11uri ## explicit; go 1.20 github.com/stretchr/testify/assert github.com/stretchr/testify/require +# github.com/sylabs/sif/v2 v2.15.0 +## explicit; go 1.20 +github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit github.com/syndtr/gocapability/capability +# github.com/tchap/go-patricia/v2 v2.3.1 +## explicit; go 1.16 +github.com/tchap/go-patricia/v2/patricia # github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 ## explicit github.com/titanous/rocacheck @@ -726,13 +868,18 @@ golang.org/x/crypto/sha3 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices +# golang.org/x/mod v0.13.0 +## explicit; go 1.18 +golang.org/x/mod/semver # golang.org/x/net v0.19.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries +golang.org/x/net/proxy golang.org/x/net/trace # golang.org/x/oauth2 v0.15.0 ## explicit; go 1.18 @@ -747,13 +894,16 @@ golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sync v0.5.0 ## explicit; go 1.18 +golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.15.0 ## explicit; go 1.18 golang.org/x/sys/cpu +golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/windows/registry # golang.org/x/term v0.15.0 ## explicit; go 1.18 golang.org/x/term @@ -766,6 +916,25 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate +# golang.org/x/tools v0.14.0 +## explicit; go 1.18 +golang.org/x/tools/cmd/stringer +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/internal/event +golang.org/x/tools/internal/event/core +golang.org/x/tools/internal/event/keys +golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/event/tag +golang.org/x/tools/internal/gcimporter +golang.org/x/tools/internal/gocommand +golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/tokeninternal +golang.org/x/tools/internal/typeparams +golang.org/x/tools/internal/typesinternal # golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 ## explicit; go 1.17 golang.org/x/xerrors