From 97550537d3b208ff30cac69553b9408eddbf107f Mon Sep 17 00:00:00 2001 From: Yoan Blanc Date: Tue, 11 Jun 2019 10:56:52 +0200 Subject: [PATCH 1/2] vendor: docker v18.09.6 Signed-off-by: Yoan Blanc --- vendor/github.com/Microsoft/hcsshim/LICENSE | 21 + .../hcsshim/osversion/osversion_windows.go | 57 + .../hcsshim/osversion/windowsbuilds.go | 23 + .../github.com/containerd/continuity/LICENSE | 18 +- .../continuity/pathdriver/path_driver.go | 16 + vendor/github.com/containerd/fifo/readme.md | 2 +- .../docker/distribution/CHANGELOG.md | 4 +- .../docker/distribution/RELEASE-CHECKLIST.md | 3 +- .../github.com/docker/distribution/ROADMAP.md | 15 +- .../distribution/registry/api/v2/errors.go | 2 +- vendor/github.com/docker/docker/LICENSE | 2 +- .../docker/api/types/container/host_config.go | 1 - .../docker/docker/api/types/filters/parse.go | 16 - .../docker/docker/api/types/mount/mount.go | 3 +- .../docker/docker/api/types/stats.go | 2 +- .../docker/api/types/swarm/container.go | 1 - .../docker/docker/api/types/swarm/swarm.go | 2 - .../docker/docker/api/types/types.go | 1 - .../docker/docker/pkg/archive/README.md | 1 - .../docker/docker/pkg/archive/archive.go | 1284 ----------------- .../docker/pkg/archive/archive_linux.go | 92 -- .../docker/pkg/archive/archive_other.go | 7 - .../docker/docker/pkg/archive/archive_unix.go | 114 -- .../docker/pkg/archive/archive_windows.go | 67 - .../docker/docker/pkg/archive/changes.go | 445 ------ .../docker/pkg/archive/changes_linux.go | 286 ---- .../docker/pkg/archive/changes_other.go | 97 -- .../docker/docker/pkg/archive/changes_unix.go | 43 - .../docker/pkg/archive/changes_windows.go | 34 - .../docker/docker/pkg/archive/copy.go | 472 ------ .../docker/docker/pkg/archive/copy_unix.go | 11 - .../docker/docker/pkg/archive/copy_windows.go | 9 - .../docker/docker/pkg/archive/diff.go | 260 ---- .../docker/docker/pkg/archive/time_linux.go | 16 - .../docker/pkg/archive/time_unsupported.go | 16 - .../docker/docker/pkg/archive/whiteouts.go | 23 - .../docker/docker/pkg/archive/wrap.go | 59 - .../docker/docker/pkg/fileutils/fileutils.go | 2 +- .../docker/docker/registry/registry.go | 2 +- .../docker/docker/registry/service_v2.go | 4 +- .../govendor/github.com/docker/engine/LICENSE | 191 +++ .../govendor/github.com/docker/engine/NOTICE | 19 + .../docker/volume/mounts/linux_parser.go | 3 - .../docker/go-connections/nat/nat.go | 2 +- .../docker/go-connections/tlsconfig/config.go | 6 +- vendor/github.com/docker/libnetwork/LICENSE | 1 + .../docker/libnetwork/ipamutils/utils.go | 89 +- vendor/github.com/fatih/color/README.md | 11 +- vendor/github.com/fatih/color/doc.go | 2 +- vendor/github.com/godbus/dbus/README.markdown | 2 +- .../github.com/gorilla/mux/ISSUE_TEMPLATE.md | 1 + vendor/github.com/gorilla/mux/README.md | 14 +- vendor/github.com/gorilla/mux/go.mod | 1 + vendor/github.com/hashicorp/go-version/go.mod | 1 + .../github.com/hashicorp/memberlist/state.go | 2 +- vendor/github.com/hashicorp/yamux/go.mod | 1 + vendor/github.com/miekg/dns/labels.go | 2 +- vendor/github.com/morikuni/aec/LICENSE | 21 + vendor/github.com/morikuni/aec/README.md | 178 +++ vendor/github.com/morikuni/aec/aec.go | 137 ++ vendor/github.com/morikuni/aec/ansi.go | 59 + vendor/github.com/morikuni/aec/builder.go | 388 +++++ vendor/github.com/morikuni/aec/sample.gif | Bin 0 -> 12548 bytes vendor/github.com/morikuni/aec/sgr.go | 202 +++ .../seccomp/libseccomp-golang/README | 1 + .../golang.org/x/net/internal/socks/client.go | 168 +++ .../golang.org/x/net/internal/socks/socks.go | 317 ++++ vendor/golang.org/x/net/proxy/dial.go | 54 + vendor/golang.org/x/net/proxy/direct.go | 15 +- vendor/golang.org/x/net/proxy/per_host.go | 15 + vendor/golang.org/x/net/proxy/proxy.go | 33 +- vendor/golang.org/x/net/proxy/socks5.go | 222 +-- vendor/vendor.json | 81 +- 73 files changed, 2087 insertions(+), 3685 deletions(-) create mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/home/yoan/go/.cache/govendor/github.com/docker/engine/LICENSE create mode 100644 vendor/github.com/docker/docker/volume/mounts/home/yoan/go/.cache/govendor/github.com/docker/engine/NOTICE create mode 100644 vendor/github.com/gorilla/mux/go.mod create mode 100644 vendor/github.com/hashicorp/go-version/go.mod create mode 100644 vendor/github.com/hashicorp/yamux/go.mod create mode 100644 vendor/github.com/morikuni/aec/LICENSE create mode 100644 vendor/github.com/morikuni/aec/README.md create mode 100644 vendor/github.com/morikuni/aec/aec.go create mode 100644 vendor/github.com/morikuni/aec/ansi.go create mode 100644 vendor/github.com/morikuni/aec/builder.go create mode 100644 vendor/github.com/morikuni/aec/sample.gif create mode 100644 vendor/github.com/morikuni/aec/sgr.go create mode 100644 vendor/golang.org/x/net/internal/socks/client.go create mode 100644 vendor/golang.org/x/net/internal/socks/socks.go create mode 100644 vendor/golang.org/x/net/proxy/dial.go diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 00000000000..49d21669aee --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 00000000000..477fe70783d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,57 @@ +package osversion + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// Build gets the build-number on Windows +// The calling application must be manifested to get the correct version information. +func Build() uint16 { + return Get().Build +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 00000000000..c26d35ac573 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,23 @@ +package osversion + +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1809 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 +) diff --git a/vendor/github.com/containerd/continuity/LICENSE b/vendor/github.com/containerd/continuity/LICENSE index 8dada3edaf5..584149b6ee2 100644 --- a/vendor/github.com/containerd/continuity/LICENSE +++ b/vendor/github.com/containerd/continuity/LICENSE @@ -1,6 +1,7 @@ + Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -175,24 +176,13 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} + Copyright The containerd Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go index b43d55fe959..b0d5a6b5670 100644 --- a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go +++ b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package pathdriver import ( diff --git a/vendor/github.com/containerd/fifo/readme.md b/vendor/github.com/containerd/fifo/readme.md index 393830f9219..2b41b3b1ca3 100644 --- a/vendor/github.com/containerd/fifo/readme.md +++ b/vendor/github.com/containerd/fifo/readme.md @@ -28,5 +28,5 @@ func (f *fifo) Write(b []byte) (int, error) // Close the fifo. Next reads/writes will error. This method can also be used // before open(2) has returned and fifo was never opened. -func (f *fifo) Close() error +func (f *fifo) Close() error ``` diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md index 9e46af37f25..e7b16b3c258 100644 --- a/vendor/github.com/docker/distribution/CHANGELOG.md +++ b/vendor/github.com/docker/distribution/CHANGELOG.md @@ -39,7 +39,7 @@ - Changes the client Tags `All()` method to follow links - Allow registry clients to connect via HTTP2 - Better handling of OAuth errors in client - + #### Spec - Manifest: clarify relationship between urls and foreign layers - Authorization: add support for repository classes @@ -104,3 +104,5 @@ The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and #### Docker Image - Use Alpine Linux as base image + + diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md index e959c2a4189..73eba5a8716 100644 --- a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md +++ b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md @@ -25,7 +25,7 @@ check comment and correct commit hash. 50. Push the signed tag -60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. +60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. 70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. @@ -41,3 +41,4 @@ e.g. to release `2.3.1` `2.3 -> 2.3.1` 90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. + diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md index aa3bd3bff07..701127afec6 100644 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -27,7 +27,7 @@ considerations made in respect of the future of the project. Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming features and bugfixes for a component will be added to the relevant milestone. If a feature or bugfix is not part of a milestone, it is currently unscheduled for -implementation. +implementation. * [Registry](#registry) * [Distribution Package](#distribution-package) @@ -40,7 +40,7 @@ The new Docker registry is the main portion of the distribution repository. Registry 2.0 is the first release of the next-generation registry. This was primarily focused on implementing the [new registry API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. +with a focus on security and performance. Following from the Distribution project goals above, we have a set of goals for registry v2 that we would like to follow in the design. New features @@ -105,9 +105,9 @@ landing in the registry. ##### Proxying to other Registries -A _pull-through caching_ mode exists for the registry, but is restricted from +A _pull-through caching_ mode exists for the registry, but is restricted from within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the +can be expanded when image provenance has been specified and implemented in the distribution project. ##### Metadata storage @@ -247,13 +247,13 @@ Please see the following issues for more detail: - https://github.com/docker/distribution/issues/461 - https://github.com/docker/distribution/issues/462 -### Distribution Package +### Distribution Package At its core, the Distribution Project is a set of Go packages that make up Distribution Components. At this time, most of these packages make up the -Registry implementation. +Registry implementation. -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. For feature additions, please see the Registry section. In the future, we may break out a separate Roadmap for distribution-specific features that apply to more than @@ -264,3 +264,4 @@ just the registry. ### Project Planning An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go index 2883c51e18c..97d6923aa03 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -98,7 +98,7 @@ var ( ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_BLOB_UNKNOWN", Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is + Description: `This error may be returned when a manifest blob is unknown to the registry.`, HTTPStatusCode: http.StatusBadRequest, }) diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE index 6d8d58fb676..9c8e20ab85c 100644 --- a/vendor/github.com/docker/docker/LICENSE +++ b/vendor/github.com/docker/docker/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2018 Docker, Inc. + Copyright 2013-2017 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 44bdfec962b..4ef26fa6c87 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -329,7 +329,6 @@ type Resources struct { DeviceCgroupRules []string // List of rule to be added to the device cgroup DiskQuota int64 // Disk limit (in bytes) KernelMemory int64 // Kernel memory limit (in bytes) - KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap MemorySwappiness *int64 // Tuning container memory swappiness behaviour diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index d8f19ae227c..a41e3d8d96a 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -323,22 +323,6 @@ func (args Args) WalkValues(field string, op func(value string) error) error { return nil } -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - func deprecatedArgs(d map[string][]string) map[string]map[string]bool { m := map[string]map[string]bool{} for k, v := range d { diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index ab4446b38f6..3fef974df88 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -79,8 +79,7 @@ const ( // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` + Propagation Propagation `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go index 20daebed14b..9dc73431f3b 100644 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -129,7 +129,7 @@ type NetworkStats struct { TxBytes uint64 `json:"tx_bytes"` // Packets sent. Windows and Linux. TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // Sent errors. Not used on Windows. Note that we dont `omitempty` this // field as it is expected in the >=v1.21 API stats structure. TxErrors uint64 `json:"tx_errors"` // Outgoing packets dropped. Windows and Linux. diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index e12f09837fc..151211ff5a4 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -71,5 +71,4 @@ type ContainerSpec struct { Secrets []*SecretReference `json:",omitempty"` Configs []*ConfigReference `json:",omitempty"` Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 484cd0be7f2..b742cf1bfbb 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -14,7 +14,6 @@ type ClusterInfo struct { RootRotationInProgress bool DefaultAddrPool []string SubnetSize uint32 - DataPathPort uint32 } // Swarm represents a swarm. @@ -154,7 +153,6 @@ type InitRequest struct { ListenAddr string AdvertiseAddr string DataPathAddr string - DataPathPort uint32 ForceNewCluster bool Spec Spec AutoLockManagers bool diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index 2accda9d077..a8fae3ba32d 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -158,7 +158,6 @@ type Info struct { MemoryLimit bool SwapLimit bool KernelMemory bool - KernelMemoryTCP bool CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md deleted file mode 100644 index 7307d9694f6..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index 32132fcfed8..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,1284 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -var unpigzPath string - -func init() { - if path, err := exec.LookPath("unpigz"); err != nil { - logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") - } else { - logrus.Debugf("Using unpigz binary found at path %s", path) - unpigzPath = path - } -} - -type ( - // Compression is the state represents if compressed or not. - Compression int - // WhiteoutFormat is the format of whiteouts unpacked - WhiteoutFormat int - - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *idtools.Identity - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - } -) - -// Archiver implements the Archiver interface and allows the reuse of most utility functions of -// this package with a pluggable Untar function. Also, to facilitate the passing of specific id -// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMapping *idtools.IdentityMapping -} - -// NewDefaultArchiver returns a new Archiver without any IdentityMapping -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} -} - -// breakoutError is used to differentiate errors related to breaking out -// When testing archive breakout in the unit tests, this error is expected -// in order for the test to pass. -type breakoutError error - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat -) - -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - defer rdr.Close() - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debug("Len too short") - continue - } - if bytes.Equal(m, source[:len(m)]) { - return compression - } - } - return Uncompressed -} - -func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) -} - -func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - if unpigzPath == "" { - return gzip.NewReader(buf) - } - - disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") - if disablePigzEnv != "" { - if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { - return nil, err - } else if disablePigz { - return gzip.NewReader(buf) - } - } - - return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) -} - -func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { - return ioutils.NewReadCloserWrapper(readBuf, func() error { - cancel() - return readBuf.Close() - }) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - ctx, cancel := context.WithCancel(context.Background()) - - gzReader, err := gzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - ctx, cancel := context.WithCancel(context.Background()) - - xzReader, err := xzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to -// modify the contents or header of an entry in the archive. If the file already -// exists in the archive the TarModifierFunc will be called with the Header and -// a reader which will return the files content. If the file does not exist both -// header and content will be nil. -type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the -// tar stream are modified if they match any of the keys in mods. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - defer inputTarStream.Close() - defer tarWriter.Close() - - modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { - header, data, err := modifier(name, original, tarReader) - switch { - case err != nil: - return err - case header == nil: - return nil - } - - header.Name = name - header.Size = int64(len(data)) - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - if len(data) != 0 { - if _, err := tarWriter.Write(data); err != nil { - return err - } - } - return nil - } - - var err error - var originalHeader *tar.Header - for { - originalHeader, err = tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - modifier, ok := mods[originalHeader.Name] - if !ok { - // No modifiers for this file, copy the header and data - if err := tarWriter.WriteHeader(originalHeader); err != nil { - pipeWriter.CloseWithError(err) - return - } - if _, err := pools.Copy(tarWriter, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - continue - } - delete(mods, originalHeader.Name) - - if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - // Apply the modifiers that haven't matched any files in the archive - for name, modifier := range mods { - if err := modify(name, nil, modifier, nil); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - pipeWriter.Close() - - }() - return pipeReader -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -// FileInfoHeader creates a populated Header from fi. -// Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return nil, err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) - hdr.Name = canonicalTarName(name, fi.IsDir()) - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } - return hdr, nil -} - -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - return nil -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - IdentityMapping *idtools.IdentityMapping - ChownOpts *idtools.Identity - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IdentityMapping: idMapping, - ChownOpts: chownOpts, - } -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) string { - name = CanonicalTarNameForPath(name) - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return err - } - } - - hdr, err := FileInfoHeader(name, fi, link) - if err != nil { - return err - } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - //check whether the file is overlayfs whiteout - //if yes, skip re-mapping container ID mappings. - isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 - - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files. We skip whiteout files because they were written - //by the kernel and already have proper ownership relative to the host - if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { - fileIDPair, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) - if err != nil { - return err - } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file. We use system.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - // On Linux, this equates to a regular os.OpenFile - file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar: - if inUserns { // cannot create devices in a userns - return nil - } - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP || err == syscall.EPERM { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. - // EPERM occurs if modifying xattrs is not allowed. This can - // happen when running in userns with restrictions (ChromeOS). - errors = append(errors, err.Error()) - continue - } - return err - } - - } - - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := hdr.AccessTime - if aTime.Before(hdr.ModTime) { - // Last access time should never be before last modified time. - aTime = hdr.ModTime - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), - compressWriter, - options.ChownOpts, - ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = pm.Matches(relFilePath) - if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !pm.Exclusions() { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMapping.RootPair() - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - if err := remapIDs(idMapping, hdr); err != nil { - return err - } - - if whiteoutConverter != nil { - writeFile, err := whiteoutConverter.ConvertRead(hdr, path) - if err != nil { - return err - } - if !writeFile { - continue - } - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), - } - return archiver.Untar(archive, dst, options) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this Archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootIDs := archiver.IDMapping.RootPair() - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { - return err - } - - r, w := io.Pipe() - errC := make(chan error, 1) - - go func() { - defer close(errC) - - errC <- func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - if err := remapIDs(archiver.IDMapping, hdr); err != nil { - return err - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }() - }() - defer func() { - if er := <-errC; err == nil && er != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { - return archiver.IDMapping -} - -func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { - ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) - hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - }() - - return pipeR, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go deleted file mode 100644 index 970d4d06800..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ /dev/null @@ -1,92 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} - } - return nil -} - -type overlayWhiteoutConverter struct{} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - return nil, err - } - if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - } - } - - return -} - -func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) - // don't write the file itself - return false, err - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, err - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go deleted file mode 100644 index 462dfc63232..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index 1eec912b7b0..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "golang.org/x/sys/unix" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) string { - return p // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert - } - } - - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - inode = s.Ino - } - - return -} - -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index ae6b89fd719..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,67 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) string { - return filepath.ToSlash(p) -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) - permPart := perm & os.ModePerm - noPermPart := perm &^ os.ModePerm - // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 - - return noPermPart | permPart -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - // do nothing. no notion of Rdev, Nlink in stat on Windows - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Inode in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { - // no notion of file ownership mapping yet on Windows - return idtools.Identity{UID: 0, GID: 0}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index aedb91b0352..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,445 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar doesn't have sub-second mtime precision. The go tar -// writer (1.10+) does when using PAX format, but we round times to seconds -// to ensure archives have the same hashes for backwards compatibility. -// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. -// -// Non-sub-second is problematic when we apply changes via tar -// files. We handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a.Equal(b) || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) -} - -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) - -func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - if sc != nil { - if skip, err := sc(path); skip { - return err - } - } - - change := Change{ - Path: path, - } - - deletedFile, err := dc(rw, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - change.Path = deletedFile - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - !bytes.Equal(oldChild.capability, newChild.capability) { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { - reader, writer := io.Pipe() - go func() { - ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index f8792b3d4e5..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,286 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of unix.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index ba744741cd0..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index 06217b7161e..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - "syscall" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size or modification time for dirs, its not a good - // measure of change. See https://github.com/moby/moby/issues/9874 - // for a description of the issue with modification time, and - // https://github.com/moby/moby/pull/11422 for the change. - // (Note that in the Windows implementation of this function, - // modification time IS taken as a change). See - // https://github.com/moby/moby/pull/37982 for more information. - (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return fi.Sys().(*syscall.Stat_t).Ino -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index 9906685e4b0..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Note there is slight difference between the Linux and Windows - // implementations here. Due to https://github.com/moby/moby/issues/9874, - // and the fix at https://github.com/moby/moby/pull/11422, Linux does not - // consider a change to the directory time as a change. Windows on NTFS - // does. See https://github.com/moby/moby/pull/37982 for more information. - - if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index d0f13ca79be..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,472 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in the separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { - // Ensure paths are in platform semantics - cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) - originalPath = strings.Replace(originalPath, "/", string(sep), -1) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath, sep) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(sep) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { - cleanedPath += string(sep) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string, sep byte) bool { - return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string, sep byte) bool { - return len(path) > 0 && path[len(path)-1] == sep -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(filepath.FromSlash(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(os.PathSeparator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between its directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - opts := TarResourceRebaseOpts(sourceBase, rebaseName) - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - return TarWithOptions(sourceDir, opts) -} - -// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase -// parameters to be sent to TarWithOptions (the TarOptions struct) -func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { - filter := []string{sourceBase} - return &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - } -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Stat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path, os.PathSeparator): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - if hdr.Typeflag == tar.TypeLink { - hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) - } - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// TODO @gupta-ak. These might have to be changed in the future to be -// continuity driver aware as well to support LCOW. - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { - if followLink { - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - var resolvedDirPath string - resolvedDirPath, err = filepath.EvalSymlinks(dirPath) - if err != nil { - return - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path, os.PathSeparator) && - filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && - !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path, os.PathSeparator) && - !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index 3958364f5ba..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index a878d1bac42..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index 146e21fe18c..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,260 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600, "") - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := remapIDs(idMapping, srcHdr); err != nil { - return 0, err - } - - if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - if runtime.GOOS != "windows" { - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) - } - - if decompress { - decompLayer, err := DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompLayer.Close() - layer = decompLayer - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 797143ee84d..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = (1 << 30) - 2 - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index f58bf227fd3..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go deleted file mode 100644 index 4c072a87ee5..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index 85435694cff..00000000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "io" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (io.Reader, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return buf, nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 34f1c726fb5..28cad499aa5 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -106,7 +106,7 @@ func (pm *PatternMatcher) Patterns() []*Pattern { return pm.patterns } -// Pattern defines a single regexp used to filter file paths. +// Pattern defines a single regexp used used to filter file paths. type Pattern struct { cleanedPattern string dirs []string diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go index 7a84bbfb7ee..6727b7dc32f 100644 --- a/vendor/github.com/docker/docker/registry/registry.go +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -145,7 +145,7 @@ func trustedLocation(req *http.Request) bool { // addRequiredHeadersToRedirectedRequests adds the necessary redirection headers // for redirected requests func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if via != nil && via[0] != nil { + if len(via) != 0 && via[0] != nil { if trustedLocation(req) && trustedLocation(via[0]) { req.Header = via[0].Header return nil diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go index 1a4c9e31054..3a56dc91145 100644 --- a/vendor/github.com/docker/docker/registry/service_v2.go +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -57,7 +57,7 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp Scheme: "https", Host: hostname, }, - Version: APIVersion2, + Version: APIVersion2, AllowNondistributableArtifacts: ana, TrimHostname: true, TLSConfig: tlsConfig, @@ -70,7 +70,7 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp Scheme: "http", Host: hostname, }, - Version: APIVersion2, + Version: APIVersion2, AllowNondistributableArtifacts: ana, TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify diff --git a/vendor/github.com/docker/docker/volume/mounts/home/yoan/go/.cache/govendor/github.com/docker/engine/LICENSE b/vendor/github.com/docker/docker/volume/mounts/home/yoan/go/.cache/govendor/github.com/docker/engine/LICENSE new file mode 100644 index 00000000000..9c8e20ab85c --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/home/yoan/go/.cache/govendor/github.com/docker/engine/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/volume/mounts/home/yoan/go/.cache/govendor/github.com/docker/engine/NOTICE b/vendor/github.com/docker/docker/volume/mounts/home/yoan/go/.cache/govendor/github.com/docker/engine/NOTICE new file mode 100644 index 00000000000..0c74e15b057 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/home/yoan/go/.cache/govendor/github.com/docker/engine/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go index 035a24a8d99..8e436aec0ef 100644 --- a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go +++ b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go @@ -97,9 +97,6 @@ func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSour return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} } case mount.TypeTmpfs: - if mnt.BindOptions != nil { - return &errMountConfig{mnt, errExtraField("BindOptions")} - } if len(mnt.Source) != 0 { return &errMountConfig{mnt, errExtraField("Source")} } diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index 4d5f5ae63af..bb7e4e33695 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -113,7 +113,7 @@ func SplitProtoPort(rawPort string) (string, string) { } func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp"} { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { if availableProto == proto { return true } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index f11f166a442..0ef3fdcb469 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -46,8 +46,6 @@ var acceptedCBCCiphers = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls @@ -67,8 +65,8 @@ var allTLSVersions = map[uint16]struct{}{ // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { tlsconfig := &tls.Config{ - // Avoid fallback by default to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, CipherSuites: DefaultServerAcceptedCiphers, } diff --git a/vendor/github.com/docker/libnetwork/LICENSE b/vendor/github.com/docker/libnetwork/LICENSE index 5c304d1a4a7..e06d2081865 100644 --- a/vendor/github.com/docker/libnetwork/LICENSE +++ b/vendor/github.com/docker/libnetwork/LICENSE @@ -199,3 +199,4 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/vendor/github.com/docker/libnetwork/ipamutils/utils.go b/vendor/github.com/docker/libnetwork/ipamutils/utils.go index f8eca58e8c2..3fd37cd8840 100644 --- a/vendor/github.com/docker/libnetwork/ipamutils/utils.go +++ b/vendor/github.com/docker/libnetwork/ipamutils/utils.go @@ -5,23 +5,20 @@ import ( "fmt" "net" "sync" - - "github.com/sirupsen/logrus" ) var ( - // PredefinedBroadNetworks contains a list of 31 IPv4 private networks with host size 16 and 12 - // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGranularNetworks` - PredefinedBroadNetworks []*net.IPNet - // PredefinedGranularNetworks contains a list of 64K IPv4 private networks with host size 8 - // (10.x.x.x/24) which do not overlap with the networks in `PredefinedBroadNetworks` - PredefinedGranularNetworks []*net.IPNet - initNetworksOnce sync.Once - - defaultBroadNetwork = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16}, + // PredefinedLocalScopeDefaultNetworks contains a list of 31 IPv4 private networks with host size 16 and 12 + // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGlobalScopeDefaultNetworks` + PredefinedLocalScopeDefaultNetworks []*net.IPNet + // PredefinedGlobalScopeDefaultNetworks contains a list of 64K IPv4 private networks with host size 8 + // (10.x.x.x/24) which do not overlap with the networks in `PredefinedLocalScopeDefaultNetworks` + PredefinedGlobalScopeDefaultNetworks []*net.IPNet + mutex sync.Mutex + localScopeDefaultNetworks = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16}, {"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16}, {"192.168.0.0/16", 20}} - defaultGranularNetwork = []*NetworkToSplit{{"10.0.0.0/8", 24}} + globalScopeDefaultNetworks = []*NetworkToSplit{{"10.0.0.0/8", 24}} ) // NetworkToSplit represent a network that has to be split in chunks with mask length Size. @@ -34,19 +31,61 @@ type NetworkToSplit struct { Size int `json:"size"` } -// InitNetworks initializes the broad network pool and the granular network pool -func InitNetworks(defaultAddressPool []*NetworkToSplit) { - initNetworksOnce.Do(func() { - // error ingnored should never fail - PredefinedGranularNetworks, _ = splitNetworks(defaultGranularNetwork) - if defaultAddressPool == nil { - defaultAddressPool = defaultBroadNetwork - } - var err error - if PredefinedBroadNetworks, err = splitNetworks(defaultAddressPool); err != nil { - logrus.WithError(err).Error("InitAddressPools failed to initialize the default address pool") - } - }) +func init() { + var err error + if PredefinedGlobalScopeDefaultNetworks, err = splitNetworks(globalScopeDefaultNetworks); err != nil { + //we are going to panic in case of error as we should never get into this state + panic("InitAddressPools failed to initialize the global scope default address pool") + } + + if PredefinedLocalScopeDefaultNetworks, err = splitNetworks(localScopeDefaultNetworks); err != nil { + //we are going to panic in case of error as we should never get into this state + panic("InitAddressPools failed to initialize the local scope default address pool") + } +} + +// configDefaultNetworks configures local as well global default pool based on input +func configDefaultNetworks(defaultAddressPool []*NetworkToSplit, result *[]*net.IPNet) error { + mutex.Lock() + defer mutex.Unlock() + defaultNetworks, err := splitNetworks(defaultAddressPool) + if err != nil { + return err + } + *result = defaultNetworks + return nil +} + +// GetGlobalScopeDefaultNetworks returns PredefinedGlobalScopeDefaultNetworks +func GetGlobalScopeDefaultNetworks() []*net.IPNet { + mutex.Lock() + defer mutex.Unlock() + return PredefinedGlobalScopeDefaultNetworks +} + +// GetLocalScopeDefaultNetworks returns PredefinedLocalScopeDefaultNetworks +func GetLocalScopeDefaultNetworks() []*net.IPNet { + mutex.Lock() + defer mutex.Unlock() + return PredefinedLocalScopeDefaultNetworks +} + +// ConfigGlobalScopeDefaultNetworks configures global default pool. +// Ideally this will be called from SwarmKit as part of swarm init +func ConfigGlobalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error { + if defaultAddressPool == nil { + defaultAddressPool = globalScopeDefaultNetworks + } + return configDefaultNetworks(defaultAddressPool, &PredefinedGlobalScopeDefaultNetworks) +} + +// ConfigLocalScopeDefaultNetworks configures local default pool. +// Ideally this will be called during libnetwork init +func ConfigLocalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error { + if defaultAddressPool == nil { + return nil + } + return configDefaultNetworks(defaultAddressPool, &PredefinedLocalScopeDefaultNetworks) } // splitNetworks takes a slice of networks, split them accordingly and returns them diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md index 325a23fc67c..3fc95446028 100644 --- a/vendor/github.com/fatih/color/README.md +++ b/vendor/github.com/fatih/color/README.md @@ -130,13 +130,13 @@ fmt.Println("All text will now be bold magenta.") ``` ### Disable/Enable color - -There might be a case where you want to explicitly disable/enable color output. the -`go-isatty` package will automatically disable color output for non-tty output streams + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams (for example if the output were piped directly to `less`) -`Color` has support to disable/enable colors both globally and for single color -definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You can easily disable the color output with: ```go @@ -176,3 +176,4 @@ c.Println("This prints again cyan...") ## License The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go index 54186132ea7..cf1e96500f4 100644 --- a/vendor/github.com/fatih/color/doc.go +++ b/vendor/github.com/fatih/color/doc.go @@ -115,7 +115,7 @@ the color output with: var flagNoColor = flag.Bool("no-color", false, "Disable color output") if *flagNoColor { - color.NoColor = true // disables colorized output + color.NoColor = true // disables colorized output } It also has support for single color definitions (local). You can diff --git a/vendor/github.com/godbus/dbus/README.markdown b/vendor/github.com/godbus/dbus/README.markdown index bde2a39c3b6..0a6e7e5bfb0 100644 --- a/vendor/github.com/godbus/dbus/README.markdown +++ b/vendor/github.com/godbus/dbus/README.markdown @@ -25,7 +25,7 @@ If you want to use the subpackages, you can install them the same way. The complete package documentation and some simple examples are available at [godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the [_examples](https://github.com/godbus/dbus/tree/master/_examples) directory -gives a short overview over the basic usage. +gives a short overview over the basic usage. #### Projects using godbus - [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library. diff --git a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md index 196deda23e0..232be82e47a 100644 --- a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md +++ b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md @@ -8,3 +8,4 @@ **Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it) + diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index bb1d0fd0e7f..e424397ac4d 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -467,13 +467,13 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler token := r.Header.Get("X-Session-Token") if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - // Pass down the request to the next middleware (or final handler) - next.ServeHTTP(w, r) + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + // Pass down the request to the next middleware (or final handler) + next.ServeHTTP(w, r) } else { - // Write an error and stop the handler chain - http.Error(w, "Forbidden", http.StatusForbidden) + // Write an error and stop the handler chain + http.Error(w, "Forbidden", http.StatusForbidden) } }) } @@ -601,7 +601,7 @@ func TestMetricsHandler(t *testing.T) { } rr := httptest.NewRecorder() - + // Need to create a router that we can pass the request through so that the vars will be added to the context router := mux.NewRouter() router.HandleFunc("/metrics/{type}", MetricsHandler) diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod new file mode 100644 index 00000000000..cfc8ede5818 --- /dev/null +++ b/vendor/github.com/gorilla/mux/go.mod @@ -0,0 +1 @@ +module github.com/gorilla/mux diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod new file mode 100644 index 00000000000..f5285555fa8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/memberlist/state.go b/vendor/github.com/hashicorp/memberlist/state.go index 869ce8c0708..6caded31358 100644 --- a/vendor/github.com/hashicorp/memberlist/state.go +++ b/vendor/github.com/hashicorp/memberlist/state.go @@ -953,7 +953,7 @@ func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { // possible for the following situation to happen: // 1) Start with configuration C, join cluster // 2) Hard fail / Kill / Shutdown - // 3) Retarttart with configuration C', join cluster + // 3) Restart with configuration C', join cluster // // In this case, other nodes and the local node see the same incarnation, // but the values may not be the same. For this reason, we always diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod new file mode 100644 index 00000000000..672a0e58112 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/yamux diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go index fca5c7dd2d1..cb549fc672c 100644 --- a/vendor/github.com/miekg/dns/labels.go +++ b/vendor/github.com/miekg/dns/labels.go @@ -107,7 +107,7 @@ func CountLabel(s string) (labels int) { // Split splits a name s into its label indexes. // www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. -// The root name (.) returns nil. Also see SplitDomainName. +// The root name (.) returns nil. Also see SplitDomainName. // s must be a syntactically valid domain name. func Split(s string) []int { if s == "." { diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE new file mode 100644 index 00000000000..1c264016419 --- /dev/null +++ b/vendor/github.com/morikuni/aec/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Taihei Morikuni + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/morikuni/aec/README.md b/vendor/github.com/morikuni/aec/README.md new file mode 100644 index 00000000000..3cbc4343ee2 --- /dev/null +++ b/vendor/github.com/morikuni/aec/README.md @@ -0,0 +1,178 @@ +# aec + +[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec) + +Go wrapper for ANSI escape code. + +## Install + +```bash +go get github.com/morikuni/aec +``` + +## Features + +ANSI escape codes depend on terminal environment. +Some of these features may not work. +Check supported Font-Style/Font-Color features with [checkansi](./checkansi). + +[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail. + +### Cursor + +- `Up(n)` +- `Down(n)` +- `Right(n)` +- `Left(n)` +- `NextLine(n)` +- `PreviousLine(n)` +- `Column(col)` +- `Position(row, col)` +- `Save` +- `Restore` +- `Hide` +- `Show` +- `Report` + +### Erase + +- `EraseDisplay(mode)` +- `EraseLine(mode)` + +### Scroll + +- `ScrollUp(n)` +- `ScrollDown(n)` + +### Font Style + +- `Bold` +- `Faint` +- `Italic` +- `Underline` +- `BlinkSlow` +- `BlinkRapid` +- `Inverse` +- `Conceal` +- `CrossOut` +- `Frame` +- `Encircle` +- `Overline` + +### Font Color + +Foreground color. + +- `DefaultF` +- `BlackF` +- `RedF` +- `GreenF` +- `YellowF` +- `BlueF` +- `MagentaF` +- `CyanF` +- `WhiteF` +- `LightBlackF` +- `LightRedF` +- `LightGreenF` +- `LightYellowF` +- `LightBlueF` +- `LightMagentaF` +- `LightCyanF` +- `LightWhiteF` +- `Color3BitF(color)` +- `Color8BitF(color)` +- `FullColorF(r, g, b)` + +Background color. + +- `DefaultB` +- `BlackB` +- `RedB` +- `GreenB` +- `YellowB` +- `BlueB` +- `MagentaB` +- `CyanB` +- `WhiteB` +- `LightBlackB` +- `LightRedB` +- `LightGreenB` +- `LightYellowB` +- `LightBlueB` +- `LightMagentaB` +- `LightCyanB` +- `LightWhiteB` +- `Color3BitB(color)` +- `Color8BitB(color)` +- `FullColorB(r, g, b)` + +### Color Converter + +24bit RGB color to ANSI color. + +- `NewRGB3Bit(r, g, b)` +- `NewRGB8Bit(r, g, b)` + +### Builder + +To mix these features. + +```go +custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI +custom.Apply("Hello World") +``` + +## Usage + +1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI` +2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))` + +`aec.Reset` should be added when using font style or font color features. + +## Example + +Simple progressbar. + +![sample](./sample.gif) + +```go +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/morikuni/aec" +) + +func main() { + const n = 20 + builder := aec.EmptyBuilder + + up2 := aec.Up(2) + col := aec.Column(n + 2) + bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64)) + label := builder.LightRedF().Underline().With(col).Right(1).ANSI + + // for up2 + fmt.Println() + fmt.Println() + + for i := 0; i <= n; i++ { + fmt.Print(up2) + fmt.Println(label.Apply(fmt.Sprint(i, "/", n))) + fmt.Print("[") + fmt.Print(bar.Apply(strings.Repeat("=", i))) + fmt.Println(col.Apply("]")) + time.Sleep(100 * time.Millisecond) + } +} +``` + +## License + +[MIT](./LICENSE) + + diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go new file mode 100644 index 00000000000..566be6eb1ef --- /dev/null +++ b/vendor/github.com/morikuni/aec/aec.go @@ -0,0 +1,137 @@ +package aec + +import "fmt" + +// EraseMode is listed in a variable EraseModes. +type EraseMode uint + +var ( + // EraseModes is a list of EraseMode. + EraseModes struct { + // All erase all. + All EraseMode + + // Head erase to head. + Head EraseMode + + // Tail erase to tail. + Tail EraseMode + } + + // Save saves the cursor position. + Save ANSI + + // Restore restores the cursor position. + Restore ANSI + + // Hide hides the cursor. + Hide ANSI + + // Show shows the cursor. + Show ANSI + + // Report reports the cursor position. + Report ANSI +) + +// Up moves up the cursor. +func Up(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dA", n)) +} + +// Down moves down the cursor. +func Down(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dB", n)) +} + +// Right moves right the cursor. +func Right(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dC", n)) +} + +// Left moves left the cursor. +func Left(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dD", n)) +} + +// NextLine moves down the cursor to head of a line. +func NextLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dE", n)) +} + +// PreviousLine moves up the cursor to head of a line. +func PreviousLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dF", n)) +} + +// Column set the cursor position to a given column. +func Column(col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dG", col)) +} + +// Position set the cursor position to a given absolute position. +func Position(row, col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) +} + +// EraseDisplay erases display by given EraseMode. +func EraseDisplay(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dJ", m)) +} + +// EraseLine erases lines by given EraseMode. +func EraseLine(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dK", m)) +} + +// ScrollUp scrolls up the page. +func ScrollUp(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dS", n)) +} + +// ScrollDown scrolls down the page. +func ScrollDown(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dT", n)) +} + +func init() { + EraseModes = struct { + All EraseMode + Head EraseMode + Tail EraseMode + }{ + Tail: 0, + Head: 1, + All: 2, + } + + Save = newAnsi(esc + "s") + Restore = newAnsi(esc + "u") + Hide = newAnsi(esc + "?25l") + Show = newAnsi(esc + "?25h") + Report = newAnsi(esc + "6n") +} diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go new file mode 100644 index 00000000000..e60722e6e60 --- /dev/null +++ b/vendor/github.com/morikuni/aec/ansi.go @@ -0,0 +1,59 @@ +package aec + +import ( + "fmt" + "strings" +) + +const esc = "\x1b[" + +// Reset resets SGR effect. +const Reset string = "\x1b[0m" + +var empty = newAnsi("") + +// ANSI represents ANSI escape code. +type ANSI interface { + fmt.Stringer + + // With adapts given ANSIs. + With(...ANSI) ANSI + + // Apply wraps given string in ANSI. + Apply(string) string +} + +type ansiImpl string + +func newAnsi(s string) *ansiImpl { + r := ansiImpl(s) + return &r +} + +func (a *ansiImpl) With(ansi ...ANSI) ANSI { + return concat(append([]ANSI{a}, ansi...)) +} + +func (a *ansiImpl) Apply(s string) string { + return a.String() + s + Reset +} + +func (a *ansiImpl) String() string { + return string(*a) +} + +// Apply wraps given string in ANSIs. +func Apply(s string, ansi ...ANSI) string { + if len(ansi) == 0 { + return s + } + return concat(ansi).Apply(s) +} + +func concat(ansi []ANSI) ANSI { + strs := make([]string, 0, len(ansi)) + for _, p := range ansi { + strs = append(strs, p.String()) + } + return newAnsi(strings.Join(strs, "")) +} diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go new file mode 100644 index 00000000000..13bd002d4e3 --- /dev/null +++ b/vendor/github.com/morikuni/aec/builder.go @@ -0,0 +1,388 @@ +package aec + +// Builder is a lightweight syntax to construct customized ANSI. +type Builder struct { + ANSI ANSI +} + +// EmptyBuilder is an initialized Builder. +var EmptyBuilder *Builder + +// NewBuilder creates a Builder from existing ANSI. +func NewBuilder(a ...ANSI) *Builder { + return &Builder{concat(a)} +} + +// With is a syntax for With. +func (builder *Builder) With(a ...ANSI) *Builder { + return NewBuilder(builder.ANSI.With(a...)) +} + +// Up is a syntax for Up. +func (builder *Builder) Up(n uint) *Builder { + return builder.With(Up(n)) +} + +// Down is a syntax for Down. +func (builder *Builder) Down(n uint) *Builder { + return builder.With(Down(n)) +} + +// Right is a syntax for Right. +func (builder *Builder) Right(n uint) *Builder { + return builder.With(Right(n)) +} + +// Left is a syntax for Left. +func (builder *Builder) Left(n uint) *Builder { + return builder.With(Left(n)) +} + +// NextLine is a syntax for NextLine. +func (builder *Builder) NextLine(n uint) *Builder { + return builder.With(NextLine(n)) +} + +// PreviousLine is a syntax for PreviousLine. +func (builder *Builder) PreviousLine(n uint) *Builder { + return builder.With(PreviousLine(n)) +} + +// Column is a syntax for Column. +func (builder *Builder) Column(col uint) *Builder { + return builder.With(Column(col)) +} + +// Position is a syntax for Position. +func (builder *Builder) Position(row, col uint) *Builder { + return builder.With(Position(row, col)) +} + +// EraseDisplay is a syntax for EraseDisplay. +func (builder *Builder) EraseDisplay(m EraseMode) *Builder { + return builder.With(EraseDisplay(m)) +} + +// EraseLine is a syntax for EraseLine. +func (builder *Builder) EraseLine(m EraseMode) *Builder { + return builder.With(EraseLine(m)) +} + +// ScrollUp is a syntax for ScrollUp. +func (builder *Builder) ScrollUp(n int) *Builder { + return builder.With(ScrollUp(n)) +} + +// ScrollDown is a syntax for ScrollDown. +func (builder *Builder) ScrollDown(n int) *Builder { + return builder.With(ScrollDown(n)) +} + +// Save is a syntax for Save. +func (builder *Builder) Save() *Builder { + return builder.With(Save) +} + +// Restore is a syntax for Restore. +func (builder *Builder) Restore() *Builder { + return builder.With(Restore) +} + +// Hide is a syntax for Hide. +func (builder *Builder) Hide() *Builder { + return builder.With(Hide) +} + +// Show is a syntax for Show. +func (builder *Builder) Show() *Builder { + return builder.With(Show) +} + +// Report is a syntax for Report. +func (builder *Builder) Report() *Builder { + return builder.With(Report) +} + +// Bold is a syntax for Bold. +func (builder *Builder) Bold() *Builder { + return builder.With(Bold) +} + +// Faint is a syntax for Faint. +func (builder *Builder) Faint() *Builder { + return builder.With(Faint) +} + +// Italic is a syntax for Italic. +func (builder *Builder) Italic() *Builder { + return builder.With(Italic) +} + +// Underline is a syntax for Underline. +func (builder *Builder) Underline() *Builder { + return builder.With(Underline) +} + +// BlinkSlow is a syntax for BlinkSlow. +func (builder *Builder) BlinkSlow() *Builder { + return builder.With(BlinkSlow) +} + +// BlinkRapid is a syntax for BlinkRapid. +func (builder *Builder) BlinkRapid() *Builder { + return builder.With(BlinkRapid) +} + +// Inverse is a syntax for Inverse. +func (builder *Builder) Inverse() *Builder { + return builder.With(Inverse) +} + +// Conceal is a syntax for Conceal. +func (builder *Builder) Conceal() *Builder { + return builder.With(Conceal) +} + +// CrossOut is a syntax for CrossOut. +func (builder *Builder) CrossOut() *Builder { + return builder.With(CrossOut) +} + +// BlackF is a syntax for BlackF. +func (builder *Builder) BlackF() *Builder { + return builder.With(BlackF) +} + +// RedF is a syntax for RedF. +func (builder *Builder) RedF() *Builder { + return builder.With(RedF) +} + +// GreenF is a syntax for GreenF. +func (builder *Builder) GreenF() *Builder { + return builder.With(GreenF) +} + +// YellowF is a syntax for YellowF. +func (builder *Builder) YellowF() *Builder { + return builder.With(YellowF) +} + +// BlueF is a syntax for BlueF. +func (builder *Builder) BlueF() *Builder { + return builder.With(BlueF) +} + +// MagentaF is a syntax for MagentaF. +func (builder *Builder) MagentaF() *Builder { + return builder.With(MagentaF) +} + +// CyanF is a syntax for CyanF. +func (builder *Builder) CyanF() *Builder { + return builder.With(CyanF) +} + +// WhiteF is a syntax for WhiteF. +func (builder *Builder) WhiteF() *Builder { + return builder.With(WhiteF) +} + +// DefaultF is a syntax for DefaultF. +func (builder *Builder) DefaultF() *Builder { + return builder.With(DefaultF) +} + +// BlackB is a syntax for BlackB. +func (builder *Builder) BlackB() *Builder { + return builder.With(BlackB) +} + +// RedB is a syntax for RedB. +func (builder *Builder) RedB() *Builder { + return builder.With(RedB) +} + +// GreenB is a syntax for GreenB. +func (builder *Builder) GreenB() *Builder { + return builder.With(GreenB) +} + +// YellowB is a syntax for YellowB. +func (builder *Builder) YellowB() *Builder { + return builder.With(YellowB) +} + +// BlueB is a syntax for BlueB. +func (builder *Builder) BlueB() *Builder { + return builder.With(BlueB) +} + +// MagentaB is a syntax for MagentaB. +func (builder *Builder) MagentaB() *Builder { + return builder.With(MagentaB) +} + +// CyanB is a syntax for CyanB. +func (builder *Builder) CyanB() *Builder { + return builder.With(CyanB) +} + +// WhiteB is a syntax for WhiteB. +func (builder *Builder) WhiteB() *Builder { + return builder.With(WhiteB) +} + +// DefaultB is a syntax for DefaultB. +func (builder *Builder) DefaultB() *Builder { + return builder.With(DefaultB) +} + +// Frame is a syntax for Frame. +func (builder *Builder) Frame() *Builder { + return builder.With(Frame) +} + +// Encircle is a syntax for Encircle. +func (builder *Builder) Encircle() *Builder { + return builder.With(Encircle) +} + +// Overline is a syntax for Overline. +func (builder *Builder) Overline() *Builder { + return builder.With(Overline) +} + +// LightBlackF is a syntax for LightBlueF. +func (builder *Builder) LightBlackF() *Builder { + return builder.With(LightBlackF) +} + +// LightRedF is a syntax for LightRedF. +func (builder *Builder) LightRedF() *Builder { + return builder.With(LightRedF) +} + +// LightGreenF is a syntax for LightGreenF. +func (builder *Builder) LightGreenF() *Builder { + return builder.With(LightGreenF) +} + +// LightYellowF is a syntax for LightYellowF. +func (builder *Builder) LightYellowF() *Builder { + return builder.With(LightYellowF) +} + +// LightBlueF is a syntax for LightBlueF. +func (builder *Builder) LightBlueF() *Builder { + return builder.With(LightBlueF) +} + +// LightMagentaF is a syntax for LightMagentaF. +func (builder *Builder) LightMagentaF() *Builder { + return builder.With(LightMagentaF) +} + +// LightCyanF is a syntax for LightCyanF. +func (builder *Builder) LightCyanF() *Builder { + return builder.With(LightCyanF) +} + +// LightWhiteF is a syntax for LightWhiteF. +func (builder *Builder) LightWhiteF() *Builder { + return builder.With(LightWhiteF) +} + +// LightBlackB is a syntax for LightBlackB. +func (builder *Builder) LightBlackB() *Builder { + return builder.With(LightBlackB) +} + +// LightRedB is a syntax for LightRedB. +func (builder *Builder) LightRedB() *Builder { + return builder.With(LightRedB) +} + +// LightGreenB is a syntax for LightGreenB. +func (builder *Builder) LightGreenB() *Builder { + return builder.With(LightGreenB) +} + +// LightYellowB is a syntax for LightYellowB. +func (builder *Builder) LightYellowB() *Builder { + return builder.With(LightYellowB) +} + +// LightBlueB is a syntax for LightBlueB. +func (builder *Builder) LightBlueB() *Builder { + return builder.With(LightBlueB) +} + +// LightMagentaB is a syntax for LightMagentaB. +func (builder *Builder) LightMagentaB() *Builder { + return builder.With(LightMagentaB) +} + +// LightCyanB is a syntax for LightCyanB. +func (builder *Builder) LightCyanB() *Builder { + return builder.With(LightCyanB) +} + +// LightWhiteB is a syntax for LightWhiteB. +func (builder *Builder) LightWhiteB() *Builder { + return builder.With(LightWhiteB) +} + +// Color3BitF is a syntax for Color3BitF. +func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { + return builder.With(Color3BitF(c)) +} + +// Color3BitB is a syntax for Color3BitB. +func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { + return builder.With(Color3BitB(c)) +} + +// Color8BitF is a syntax for Color8BitF. +func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { + return builder.With(Color8BitF(c)) +} + +// Color8BitB is a syntax for Color8BitB. +func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { + return builder.With(Color8BitB(c)) +} + +// FullColorF is a syntax for FullColorF. +func (builder *Builder) FullColorF(r, g, b uint8) *Builder { + return builder.With(FullColorF(r, g, b)) +} + +// FullColorB is a syntax for FullColorB. +func (builder *Builder) FullColorB(r, g, b uint8) *Builder { + return builder.With(FullColorB(r, g, b)) +} + +// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. +func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { + return builder.Color3BitF(NewRGB3Bit(r, g, b)) +} + +// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. +func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { + return builder.Color3BitB(NewRGB3Bit(r, g, b)) +} + +// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. +func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { + return builder.Color8BitF(NewRGB8Bit(r, g, b)) +} + +// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. +func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { + return builder.Color8BitB(NewRGB8Bit(r, g, b)) +} + +func init() { + EmptyBuilder = &Builder{empty} +} diff --git a/vendor/github.com/morikuni/aec/sample.gif b/vendor/github.com/morikuni/aec/sample.gif new file mode 100644 index 0000000000000000000000000000000000000000..c6c613bb70645efa4e184726060ea1ff981eeceb GIT binary patch literal 12548 zcmeHtXHe7ox^?KiNed7{8X;7XCP*=%29P2G0wRPS6cCY(5D}@N_aaD@UX-rVRGLyn ziUk#vP5`li6zlsB?&3b@?0xn-bLW1#I^&ms!?1qqS><^Q4E0qHIxs9l8jiG4D5=G zkc%wJQx+903onrs@|6>>krR&H55KY>eoh`;asZm7fND|@8CDR!sE7_%MAs^cwkwL% zD8UkxCDWAAxhk0ZDx%L+gi8*hnhqkK92Ch^mCRSgl&MNwQ$;^kh1^y{4XcSwtBI7U zOIE2%HL9a~)WsLnMc33J&6<)AG*RQ4$Z0M3oEDOz1$(b0YH(Pw?XYC`VaYpKsV=P4 z7FJXVhd0D24dNsp;UpHd#UJTNZt5VXbS0+rBajp4Phz6_~W&Tz}DWZtdlR_FHdX zbw;p>nRc|ipYKnXb{y$w-B=ziF)lQ{-S%;9vg!1zk=yOtZ)bYq9mUM%UMfA9n94_HLw{y+a(h(q-H)YoF^N>j>XMNzW=%^r(B zo(sGdWe;Xm(&m&J4NpP=hLMdqme=}W3IoesvgnH&z-y5>+3elk1COcaV|B0R4H$9Y zV*2>z@q}O<@LD9v%r8`##56I&r^e>X2z1>hQ07gn4Oe)0x)57)f?6Z+T3DWpO+>(w z>MQt9F%RMD^U($GTVz$+w>n>xyGT1aY;RqzrjI&k+M=7B6Ttxi%Q#QpELdhJQ~WMw zrd9O}%uo?|yP&=kswoRaNCawq{q$jH<8$z(&I>QTF$-N~5~0(e?+vAYzwBrOT3w!r zIIv#MgcGuVLgREB?jH?3m{G>GkIoQ(H43Z^ycVgIdj(kkQGrfjQfKIB7>WB`iZeTV zDFP?^gs~(6!zhWus|6oSv9)GywvA@u-yUUr%Yidbb%azIq|wNzEo9j{_eP}B_#TW- zr16?9#`Q1WA7vD=qshlQS__p!U07Yq&z@YW^3YK)59q?W-}e|55j$k54LvS^H!pPh z>M0R?L0_s0XR*@INk(%=>L+B7Gm$#t_D_0q>8waq&>YTY$&~b)Z5}jE4d2Dfyh!xc zqvg6veBEv^D18mIao_Qpn%-4O0~-fstT}^Is@kX{Nw;&9USun7v>{JYox4d{U~N5X zYUTY3c1q1@)ZJD|mncx`v+Rjm43R#Q!eOs+67*;_)hVXq0E*zp`ncvfcK40?+b&LW zuF##nVk`4s{20GLStcc=Qo3nPM&-L9hw*yXoa~pPnrK@qnC`Jz-I{b}<=Z?^4?m~x z*QbxFck?DmxZQ}6BRz~$@PFN3$guW!E!ZHi<@ zMhcql2!%1dB+rs1iuusFx^h7aQ7BGTc)D4^zJY zH=l4Jr6WOWbx-BYUssFVYg@lLuciL>#^*bq(9Lv*+upVDp6OWY7rgQLL)JmFM;oI| za$n6K9sj=lQQNKV;;Sj9#jo2JqPZUJEG&C}Gow^o+Ww?<<88?FyKU6>FXx^-9Zx!hv)_14rhhu5DCU4NsxrVV%*@?Lx>=@Sb;o*5Cyd;CimDp(W!r0-= z{HeGa4(&)phc3(U{%(P`cTw^QPHYUg9^vQj$l)nhIoZN{pig|GvFxE-DHp-LqMQxU zdXHv#&SvWh$-O6=B+T-9gZ03s@1w2!XZby|^<;e-;uXIjz{gweL6RCG-87KG_bT-n z%Nr7XT#=B^+xOMRk0u59JBn^p8fbiYf8oLxBWTrb%qQ{ji!nMIg+vEJKl+S9s284(QkX=Nod_q1+A_ULu0)}7Y7rh6OE zx-99*MnLOiNdZ)?GrXBd)wr#t6AL46eeUF%4*DhZ9J z9~ShtozIU{i47IouQjo~A^$?v?%}sF^af>qXmi8i&ezMEv#(y6DIEU(9kI(a6DrCA zK8ngm0b0jmp*?evQJarN0?;~mRGJ3SF;X}H!xIf9Pjbf~vSPSn(dDY#acnf>{qgem zcbs8T7JbFq%nFq*;>xDyU9`zZ5DO`~tU>tX@vkxF*grJyFVR8(Xqo;4v=}id*gu?! zVFE*B?^uste|Y+D&LmRkb$wn>T3K}No-^5Wr^}}bLD4a;@ZvpZ@{urtW)PVfQ!K5f zI+HZ7nN9+arh6~dnaJuwWTpXUa$JU6(`yw#OArIr#D8Li9@!~j%VK8*oJrs;S<_y` zxS%7jM^w|A)f{jpKCv3vO#V<4J1-V3(iNiIgEr+y+?90g1tu?+B8M7Juf06^A+bQF zCWuWeX^eTOqBmVSdJ?1%8^NKwzfkL<;ls73{N}F;g4wp-K5vb`^Jixg|7yPPqFG_> zmz|IQ!I{K8Vnnj5^b-&)`pmrWTuY);R1UrpZs@H#HyfSol{QO8U0H~Z5ieIAk408f ziuGmgV37I;Mu_ve%KChU5*k|+WOBw-AR&ocuX`XBj}9tDX$#|B)6Ef^t{Ik`N^Y5! z4zc1Hc5?K1B!Mi6&JOe1b-c{FzB7L%GkZns3X71!K%V~-wPm=^7L$bb&&EdqfK=-r zK>CN-+|0f@2+<_n_Xq!GHuyb)Svx&Sj+U4ez-T|0R&Z60JLHY<@Y10#( z(aCa{W$u#z0iUwq?()?GX7gB^gho8{_OZBvyQRl!pq{#1sgLVj&d{H;1I#9CJORT@ z>qeaF467MEcE1NJcpmuCg*#fkAS!PouewZxZ>%*OjBH&YDQvy1=6&69eOB~mv&qmp zbeHAsx2L;1>;G;xKRo>%(jA`PA)VF#3DST)NGr#vkRtQ|NK149NL_S(h7|lKNNK5% zvI3Brm>Mo7T05w@WKqd`nLyrkgW0Y2m1(Pgt8^E4dw$-XO5Wu>|G^-9?#J7)|A@TW zASRF;&4K?iYWb`vV>+Ne$U8s|PzliZ73ntD#?+GGAF)C~*Cbbi8Y}#XqMF8*0@PS> zELrKyJM&Leq^&QKSmgOJPXO}1)F6dQTQvZq_G7kxbZ4#fQ-HjJSD%FWuB}c>Z)4~> zJa$>3Uz%mRIwE`+bQ7dfUQ~baFI^(6nE>UYoC@(yu*V- zs5xNN9t)Ojzv&&-d}i%s{)Jm-p54Cm=UDM}Wc1*nLI>qT{;NO7iqt*wE_j2h9P6~k zcDG?H-dD^ng`jtK_TN0)d-r>Z$@Zc|kwFMZbtX)J9}Ej;*DfVQa39(vMDn4PoODzD zi-A~i3U3(o;w1A?R9*&P6vd5bivtj~%+B$07R)XQ+#`5ogbJ09d*lt9qSz;~x=qfe zW->p6q*D2qZmz$(05?+TL;Z=5763}n|AtbW#U^p8CAI(FAK}kVwR$t~*YJmG)i;Z0 zP(3Gym;Vv|$iXGi5u?=bXL727GENPDwIS)?!;*(2XNbHzy-=WazjyZ(+ zMc2JwE=g2+rOL#(xmLJ;xbpo~&=xiPHT)Q{RaVu@F?YU>e|$InZ{aWeF(Z;*08c>B zpj81DDp3d#IT*~Wj<>4wY*ez>zS&5K{z5caluQ|o7KtMwv{40>#W=}YiZe)-I#`vS z5Rr))9Ylu&>R?UAGVYe34rTSZq{5-GsAwZONvCvEH1nc%sFL$yrj4BtK4ZqDYBb>> z*_P$g%6x10Qx19fF-0DSf%As0*&-R{U{i2$fra;)Q zB-Hi-F_jghV~+O!$o6a#lpLfsfll6m8MH^jBsKFxFdI>x42MxD<>|L?2G+CW>0GX=472}$wC?^CsDP76MjH4FauEC~M zCuJY7B7Ql|p2acCIRx*XBU%uN&i=CCv6 zYK$-mUo^}S_wyX~1Cag(`iIWeAX#lQ24a}@pzlYElLq5Xlm8g(x!2+x6tSNi*a&A& zmGz~zIK=x11QU3##W_SGsRAv|su4dz$Odq>(t5I_yg;DEk?Eoh-b|1my6J6IO3G-< zH$IQCtomhR^|q8n*UP)1YNliJ;y{bTw@rx+YiV$af`0@D9%=OhDLKiW*0-mjEIkZ$ zT*ZOf{9hi-F{$x+VQV(}xPG8pllxCCjY|siKb-9s_5P!sv`DE>ORb4 zqh<3w5KE|_7Hta6CCj?Fa`_A(e0(f7WMFtqSvgAeKKv$r7>t1#TRk;=fG37R!{A4R zZH#aEJZ>R1@;&JEkDk68nmi|8wQM*5595^%X^pIh?q?G)E_i*G0uiw~GSYwrW^FV9 z%FEpnwvyLc2za0D2uv5_k=SbmUt6!e3cvo5uk68v1BK;`IQw-IjdyP=8KTy@>^jt@ z8y#8sT%9dKH`Y9v*1GoD63jGjHN^LC8%>vH=b&>F9N+RAK>E&&G!{C3>`o98qx|lD z@VPGZ0+^XFKK{xgL*{=i+9nAi3H}tP=vX2i4$sXXI>+Y3s5*%xHB;u0xuul|a!9W# zGEwCv(J4`36%S9=vSuy?gw%N<6)$XPn9|GvkTGwMj7HrHUZxJage*Iiez(g^AuR5p z&I*8#63eU5SHcojY;)N7a~AS4Tb>E!u>wL`5WLm83=gm%d;CdA(*ST^{yn%ej7YaPB-oQ{i%6 z?(^}R-u;@XQ!hQuJ)^LB{w18&gqFmQm;4;nJhG+mEvO zV%KiLHzwM5^99_8>wSN3QD0T=eBN1GJ05fQ@W9u9ZBZM6z{0K1?1bQWV$hEUyT>@8 z`NyAIIpNa-3@}Ug_-CQ*;-sPv%u-zePgH8! z7raf1QFotf`WsD_#UKmQm6Mt5z%0$O4`_9RS&u|5YO8)Ax{EsvDwhMZbZ#lvw=D>N z1Bi=8@$Wn>qKjVOH;`ds0-fNaQ3Xf_?j-*fh0?vMoG&{p9*SVo z=^QIr9$|?gZ0a7c!QvqWB(e;?GQHwq4mFV4`D*i*s`9pBttcw`E7XDS>r-ZtMeY=I2XRW?uo!|+6$Y_-vjX??a+1lR>g{uJGTwin0v&C zN&Z^eNYW&GAT#PPZg=y&YvBcY0WeQ6V?MR=eD@e$>5X1fQ!M_3^wt%r63Nq?7#YTtC&V{MwJ?dT7+o6ija;!0|pOs$%dxN-md_@nXHh zg5w3qv6}&Q9m?Y#BdoLYxP0_>GDq{7yPIhCiF%sQwaa(zaawVoJuUYL8;F7&#B`+L zWHP#8`#Vl(Thmv##tTnoC=^d@jV79-bc5b5Q-yO`ynu)8FDV-_es z4>!gx$ghirX zqE6#O8oQX)`w?}ni<>QGW}53y9JwxRb<&RaZ_h-3zX*HY)2mtCL01Y@p~HN@1~WLsd0W zw_6NaIlBt8rUmlns4=t{A|o$Ayc+b5I9`T}2SoSrj6gYSo&I&8@iV>7(M1w9Rwwr{_QZQck(H&=D+e#q&4>oZ?@`?Eprcn#AViEqK5w%&fO-S2q# z%(rhJ8HK2tla=gcm4(?dmC6+?~fhi z0cUC*i;;n$k_ysw1pN1U*(BcTJoX&S>^wI;GSd`cedib|pI&ky1!vLi zVyyKE(9t6*E2#7|$bdzf#VG@~%!!k+Wqx)(lJ=Kn03FSC%~ErRIh>+jGQ#d#aToV= zZ+15TGRbm&;PW7S9{9n)(Vw{YmvD~*aM%6~+`q1z_q)#&N5^!tTP;z>6w7t)Q^QK6 z>ea#iB$jg>d9ki#04eut<*@L=g*6Qz^u&7Cg`_;7bx_7Iu_Z9rkq}?Zx%>NRjGIn9 zXw|FkN#(g2Cz{((V-OR(87V$=b6uy2`(y%Y=4S15gs-T3J>6!l|bz8u* zN9>6+Hy?-~yrKy1+fcdp=R*--)^PB^m6i?iT`P*@yi-xUI%$}2aYS?W3}pY?$op3} z#av`9GC&B)_a@Lw;L_z}k zoe)Bt0Or(KiiU4JAtPmO0+?gU`}4)*2UT4XRSp@rB&c!Ta!Ky}nhjvi=edxoXF*%4 zt33x`ZmusWo{=UabTr+%jqH|H;XP31;SeHen=J`oo71W2p92b|0?=R7ejJLvV>%00Sv5(_*k7*+BrLn|@2`p`2;-!IKho9cG7xeie*kHuz|OO=Gy;Si&MAby6VU&eOXVB!OHAajo_;w4zX5di zkUBZksyOIr0%4-s3S&JxLFe9Ho3DOLw!2cDzChXNR^9P-#sSW$wOj9Al`|G9fMuT6 z&zFR<%!%W78rj`u=23^ICG{!QcYXHpP{vE%0J} z8?{?+$?aTQ-%KD*z7aj~(vER~P9GKrlLdo0Z?}a0R{NdrtLcf)6{}lT5Bxys%l*?e z!InzGhqZ@={aQZSx%3!9XwJ^Iok$X}VL|5d5n*RTly+c|`%Eb(B0241M99KR_njTtPhefJN|+>WJU8 zviQK&tOU$KG61Cbkc1>}?bw8K*n?cQeJwzB?QH)@V2+3*5Q{l}VTO^iJV$l9+JA=db5wOxd;!kv4QS8HFDoo5Sg`B4$d zk#>dxJqY0(S+6lRu!i*e*~hiqW9Ey)hKtmF95A!aANe-%8{;N-!j!P?XwBI~d z$-)Faa<<``_lp`{f1DF{0#xyCdYy`CI_!vMZ#rF{cYWv4mg7ZJaG9^SUW4>+y42^< zm0a+n;n*t+LwA2Y3klKA0M0@TJYT&As-xAhSARMS**Pkbpo?*ej8&Zk!*g9Loya*h z{Wc;=kr*JJ0=bnKk`aiAld8?OjYGFhf)f;+^^sOU&pV%_v5JpPIHKALQBI4 z3i;22Rwo*${Q`fz?Ez>Rc{Np?r0xDblMDsv1FlbYx%_3mh4&p;u z*KH#;HdQ^1&%b^VVX+Bg=|m3atKt%#hz5VC?NTNs6^-pq(9ZS=atH=*9OKI3VzcSp zZ53$rAg8fdJei0f(5aUmuCWG_V2d)0^H*$5UJy=)Wf%({sp-S6zoPMBdOFctL{4E1 zeJbFin=CjF7s6D?PU+!a(O+NVk-vE39Vl>9VC2io{5RVO_<44|@Q#jCAMAl5u+f%_ zWBYA8!oK*NDbtslvwh|?GQ;n1G@##f)g?g>a6bu}F?`h9p6IBbZ+j@VA_=9H&T+BX zlLGMEUW`jZPD{VFND`1rk-3kENT-m^9b}3&r|t0+E#YSZX-4DsN^JlpFJ@R88OFw# zodO{K(f<)_UDI9N49jiY3v#TkoI>A2TZAg@ty_d*vJKt&-7f^_)XWSA*VgA(mkea)AWycwY#2v*f5OUJSUN>8Y~HHb1Rk*_GP$xDMW=rLjtX z%{JCqL|~@Gqxab@&Itv}>S)QSc9uoCJUjk6#|RRy-{X>*y4kQW9-tarte@{?0;<8; z9r=ZDk&oa?uN?)P5^2!y+nOyQs-VQsz@*ZGEBsN2F;gI~aKP)wOlE8EJwN++wlR_B z`C-4_b=?|l1e`A^%Q(_hv~BZ`yRN|(@&VLF5b+Mn;%)aZ(4PJJW%Yoqcu; z^rD@48c|v!5kJaqa(+hC-h96{1XJh_(QawL_Ap(bA7V5H%!Uj@TZ!~tIV zl0G(?Fp(=G4QyAg6T{RrSVbp`Rwkb~Ix=CUTL`aY67t zqYJA=)-TgC}e>hXU zhfQ(#9nGa5Cw3zq6VwyCA}9-b6R6>}!9Sna@!#m`k3l76Gshr#8VBNJYO`755J22a zP||7~h?gH(=u1>v?RQB&upWa*fkNH_TUU;iR#ar-!)TKKFi9@Yae%Vf2EU%}>z&CH)it%uW9T z=6K%-Ix=upDf6QT`q2X>#=>;=<(Ws#--~2;>6_Pd)GUhz;i4VvoQBkoJaH(5J0>Ez z0Im>${t;j#xX3g8)Iw0ZWbVi*^Ir==_zmfPzi^LaJ@KBLY|<;Y&gM1rnZ5yE8ta)o-#<<3w9mD;?+DF2 zES0URSfg`al4y|umsWkG;Q?p6@%qs_n%8h_zNgZj8A3~6M^T%>(K~yQrjPHz-Mg2@ zcE3u@gq~gT`2DOB3=G#F`q=_(SbpedrTOprc~$pU{bW80=w}Oef`WqXyytKFX?Xl# zfWXiC$-*$OfHT#yHP5i%)Gf3BNk8?o_Vjbf>ks{`_<6W$10ZVsD~JpN_>}sYZU98Y zUJ8xZ5kRriEYIch?$gm8CM?h8#S3ae{KNJwa;D0&sbHQhgkibFKf*BJxw#>b+71QB z(}Wp7E4-%&S=1*LKq&`R$sKb&;heeHSJ6sPCX&t=kao zSPyvys>#$=JS=jn+73mhY>&#u5u?)}p9I{hqPwtJz6|3*!h}R9IL^tZWyqhvSx5KS zj;B)ZVd60S;MAB4!)q+Gh>n=AgPjo=tb}Lv_mHr-ej3thtNgow_$dx!X79NxMRD-U~8KZm@LAKgE$Ll1{C#o(5BI7i+Vv2*c7*|S?05=O%_UI~9s<-^y0f%V4+@&lKGB*Zz!stBk& zdMQd#n9+E}6+hMmkK*{jXO!Gz2PQl+!}^qDSr}xs{8GT`il_LMZel;GH0zX9Rdi&W Y(?6j)4M_L*quqaULH)mVw5$IA0FB)@mjD0& literal 0 HcmV?d00001 diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go new file mode 100644 index 00000000000..0ba3464e6db --- /dev/null +++ b/vendor/github.com/morikuni/aec/sgr.go @@ -0,0 +1,202 @@ +package aec + +import ( + "fmt" +) + +// RGB3Bit is a 3bit RGB color. +type RGB3Bit uint8 + +// RGB8Bit is a 8bit RGB color. +type RGB8Bit uint8 + +func newSGR(n uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", n)) +} + +// NewRGB3Bit create a RGB3Bit from given RGB. +func NewRGB3Bit(r, g, b uint8) RGB3Bit { + return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) +} + +// NewRGB8Bit create a RGB8Bit from given RGB. +func NewRGB8Bit(r, g, b uint8) RGB8Bit { + return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) +} + +// Color3BitF set the foreground color of text. +func Color3BitF(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) +} + +// Color3BitB set the background color of text. +func Color3BitB(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) +} + +// Color8BitF set the foreground color of text. +func Color8BitF(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) +} + +// Color8BitB set the background color of text. +func Color8BitB(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) +} + +// FullColorF set the foreground color of text. +func FullColorF(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) +} + +// FullColorB set the foreground color of text. +func FullColorB(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) +} + +// Style +var ( + // Bold set the text style to bold or increased intensity. + Bold ANSI + + // Faint set the text style to faint. + Faint ANSI + + // Italic set the text style to italic. + Italic ANSI + + // Underline set the text style to underline. + Underline ANSI + + // BlinkSlow set the text style to slow blink. + BlinkSlow ANSI + + // BlinkRapid set the text style to rapid blink. + BlinkRapid ANSI + + // Inverse swap the foreground color and background color. + Inverse ANSI + + // Conceal set the text style to conceal. + Conceal ANSI + + // CrossOut set the text style to crossed out. + CrossOut ANSI + + // Frame set the text style to framed. + Frame ANSI + + // Encircle set the text style to encircled. + Encircle ANSI + + // Overline set the text style to overlined. + Overline ANSI +) + +// Foreground color of text. +var ( + // DefaultF is the default color of foreground. + DefaultF ANSI + + // Normal color + BlackF ANSI + RedF ANSI + GreenF ANSI + YellowF ANSI + BlueF ANSI + MagentaF ANSI + CyanF ANSI + WhiteF ANSI + + // Light color + LightBlackF ANSI + LightRedF ANSI + LightGreenF ANSI + LightYellowF ANSI + LightBlueF ANSI + LightMagentaF ANSI + LightCyanF ANSI + LightWhiteF ANSI +) + +// Background color of text. +var ( + // DefaultB is the default color of background. + DefaultB ANSI + + // Normal color + BlackB ANSI + RedB ANSI + GreenB ANSI + YellowB ANSI + BlueB ANSI + MagentaB ANSI + CyanB ANSI + WhiteB ANSI + + // Light color + LightBlackB ANSI + LightRedB ANSI + LightGreenB ANSI + LightYellowB ANSI + LightBlueB ANSI + LightMagentaB ANSI + LightCyanB ANSI + LightWhiteB ANSI +) + +func init() { + Bold = newSGR(1) + Faint = newSGR(2) + Italic = newSGR(3) + Underline = newSGR(4) + BlinkSlow = newSGR(5) + BlinkRapid = newSGR(6) + Inverse = newSGR(7) + Conceal = newSGR(8) + CrossOut = newSGR(9) + + BlackF = newSGR(30) + RedF = newSGR(31) + GreenF = newSGR(32) + YellowF = newSGR(33) + BlueF = newSGR(34) + MagentaF = newSGR(35) + CyanF = newSGR(36) + WhiteF = newSGR(37) + + DefaultF = newSGR(39) + + BlackB = newSGR(40) + RedB = newSGR(41) + GreenB = newSGR(42) + YellowB = newSGR(43) + BlueB = newSGR(44) + MagentaB = newSGR(45) + CyanB = newSGR(46) + WhiteB = newSGR(47) + + DefaultB = newSGR(49) + + Frame = newSGR(51) + Encircle = newSGR(52) + Overline = newSGR(53) + + LightBlackF = newSGR(90) + LightRedF = newSGR(91) + LightGreenF = newSGR(92) + LightYellowF = newSGR(93) + LightBlueF = newSGR(94) + LightMagentaF = newSGR(95) + LightCyanF = newSGR(96) + LightWhiteF = newSGR(97) + + LightBlackB = newSGR(100) + LightRedB = newSGR(101) + LightGreenB = newSGR(102) + LightYellowB = newSGR(103) + LightBlueB = newSGR(104) + LightMagentaB = newSGR(105) + LightCyanB = newSGR(106) + LightWhiteB = newSGR(107) +} diff --git a/vendor/github.com/seccomp/libseccomp-golang/README b/vendor/github.com/seccomp/libseccomp-golang/README index 920c6ec9c93..66839a46685 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/README +++ b/vendor/github.com/seccomp/libseccomp-golang/README @@ -48,3 +48,4 @@ In order to execute the 'make lint' recipe the 'golint' tool is needed, it can be found at: -> https://github.com/golang/lint + diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 00000000000..3d6f516a595 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 00000000000..6929a9fd5c6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authention + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 00000000000..811c2e4e962 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go index 4c5ad88b1e7..3d66bdef9d7 100644 --- a/vendor/golang.org/x/net/proxy/direct.go +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -5,14 +5,27 @@ package proxy import ( + "context" "net" ) type direct struct{} -// Direct is a direct proxy: one that makes network connections directly. +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. var Direct = direct{} +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. func (direct) Dial(network, addr string) (net.Conn, error) { return net.Dial(network, addr) } + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index 0689bb6a70f..573fe79e86e 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -5,6 +5,7 @@ package proxy import ( + "context" "net" "strings" ) @@ -41,6 +42,20 @@ func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { return p.dialerForRequest(host).Dial(network, addr) } +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + func (p *PerHost) dialerForRequest(host string) Dialer { if ip := net.ParseIP(host); ip != nil { for _, net := range p.bypassNetworks { diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go index 553ead7cf0e..9ff4b9a7767 100644 --- a/vendor/golang.org/x/net/proxy/proxy.go +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -15,6 +15,7 @@ import ( ) // A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. type Dialer interface { // Dial connects to the given address via the proxy. Dial(network, addr string) (c net.Conn, err error) @@ -25,21 +26,30 @@ type Auth struct { User, Password string } -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { allProxy := allProxyEnv.Get() if len(allProxy) == 0 { - return Direct + return forward } proxyURL, err := url.Parse(allProxy) if err != nil { - return Direct + return forward } - proxy, err := FromURL(proxyURL, Direct) + proxy, err := FromURL(proxyURL, forward) if err != nil { - return Direct + return forward } noProxy := noProxyEnv.Get() @@ -47,7 +57,7 @@ func FromEnvironment() Dialer { return proxy } - perHost := NewPerHost(proxy, Direct) + perHost := NewPerHost(proxy, forward) perHost.AddFromString(noProxy) return perHost } @@ -79,8 +89,13 @@ func FromURL(u *url.URL, forward Dialer) (Dialer, error) { } switch u.Scheme { - case "socks5": - return SOCKS5("tcp", u.Host, auth, forward) + case "socks5", "socks5h": + addr := u.Hostname() + port := u.Port() + if port == "" { + port = "1080" + } + return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) } // If the scheme doesn't match any of the built-in schemes, see if it diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go index 3fed38ef1cc..c91651f96db 100644 --- a/vendor/golang.org/x/net/proxy/socks5.go +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -5,210 +5,38 @@ package proxy import ( - "errors" - "io" + "context" "net" - "strconv" -) - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928 and RFC 1929. -func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { - s := &socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} - -type socks5 struct { - user, password string - network, addr string - forward Dialer -} - -const socks5Version = 5 - -const ( - socks5AuthNone = 0 - socks5AuthPassword = 2 -) -const socks5Connect = 1 - -const ( - socks5IP4 = 1 - socks5Domain = 3 - socks5IP6 = 4 + "golang.org/x/net/internal/socks" ) -var socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the given network via the SOCKS5 proxy. -func (s *socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - // See RFC 1929 - if buf[1] == socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") - } - } - - buf = buf[:0] - buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, socks5IP4) - ip = ip4 +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } } else { - buf = append(buf, socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } } - buf = append(buf, socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(socks5Errors) { - failure = socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case socks5IP4: - bytesToDiscard = net.IPv4len - case socks5IP6: - bytesToDiscard = net.IPv6len - case socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate } - - return nil + return d, nil } diff --git a/vendor/vendor.json b/vendor/vendor.json index f2cfdf9883b..47225e49632 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -9,6 +9,7 @@ {"path":"github.com/DataDog/datadog-go/statsd","checksumSHA1":"WvApwvvSe3i/3KO8300dyeFmkbI=","revision":"b10af4b12965a1ad08d164f57d14195b4140d8de","revisionTime":"2017-08-09T10:47:06Z"}, {"path":"github.com/LK4D4/joincontext","checksumSHA1":"Jmf4AnrptgBdQ5TPBJ2M89nooIQ=","revision":"1724345da6d5bcc8b66fefb843b607ab918e175c","revisionTime":"2017-10-26T17:01:39Z"}, {"path":"github.com/Microsoft/go-winio","checksumSHA1":"PbR6ZKoLeSZl8aXxDQqXih0wSgE=","revision":"97e4973ce50b2ff5f09635a57e2b88a037aae829","revisionTime":"2018-08-23T22:24:21Z"}, + {"path":"github.com/Microsoft/hcsshim/osversion","checksumSHA1":"ZEVoETRsmnbHCA6hhCRUtszG9Y8=","revision":"079f257e900ec9086f9f8476e6ded645458b70e2","revisionTime":"2019-06-05T18:19:13Z"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools","checksumSHA1":"kF1vk+8Xvb3nGBiw9+qbUc0SZ4M=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml","checksumSHA1":"P8FATSSgpe5A17FyPrGpsX95Xw8=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, {"path":"github.com/NYTimes/gziphandler","checksumSHA1":"jktW57+vJsziNVPeXMCoujTzdW4=","revision":"97ae7fbaf81620fe97840685304a78a306a39c64","revisionTime":"2017-09-16T00:36:49Z"}, @@ -60,7 +61,7 @@ {"path":"github.com/circonus-labs/circonus-gometrics/checkmgr","checksumSHA1":"Ij8yB33E0Kk+GfTkNRoF1mG26dc=","revision":"d6e3aea90ab9f90fe8456e13fc520f43d102da4d","revisionTime":"2019-01-28T15:50:09Z","version":"=v2","versionExact":"v2"}, {"path":"github.com/circonus-labs/circonusllhist","checksumSHA1":"VbfeVqeOM+dTNxCmpvmYS0LwQn0=","revision":"7d649b46cdc2cd2ed102d350688a75a4fd7778c6","revisionTime":"2016-11-21T13:51:53Z"}, {"path":"github.com/containerd/console","checksumSHA1":"IGtuR58l2zmYRcNf8sPDlCSgovE=","origin":"github.com/opencontainers/runc/vendor/github.com/containerd/console","revision":"459bfaec1fc6c17d8bfb12d0a0f69e7e7271ed2a","revisionTime":"2018-08-23T14:46:37Z"}, - {"path":"github.com/containerd/continuity/pathdriver","checksumSHA1":"GqIrOttKaO7k6HIaHQLPr3cY7rY=","origin":"github.com/docker/docker/vendor/github.com/containerd/continuity/pathdriver","revision":"320063a2ad06a1d8ada61c94c29dbe44e2d87473","revisionTime":"2018-08-16T08:14:46Z"}, + {"path":"github.com/containerd/continuity/pathdriver","checksumSHA1":"6aGvzibOCGGTbCFKOHRWIASC+Us=","revision":"aaeac12a7ffcd198ae25440a9dff125c2e2703a7","revisionTime":"2019-04-26T06:22:06Z"}, {"path":"github.com/containerd/fifo","checksumSHA1":"Ur3lVmFp+HTGUzQU+/ZBolKe8FU=","revision":"3d5202aec260678c48179c56f40e6f38a095738c","revisionTime":"2018-03-07T16:51:37Z"}, {"path":"github.com/containernetworking/cni/pkg/types","checksumSHA1":"NeAp/3+Hedu9tnMai+LihERPj84=","revision":"5c3c17164270150467498a32c71436c7cd5501be","revisionTime":"2016-06-02T16:00:07Z"}, {"path":"github.com/coreos/go-semver/semver","checksumSHA1":"97BsbXOiZ8+Kr+LIuZkQFtSj7H4=","revision":"1817cd4bea52af76542157eeabd74b057d1a199e","revisionTime":"2017-06-13T09:22:38Z"}, @@ -86,44 +87,44 @@ {"path":"github.com/docker/distribution/registry/storage/cache/memory","checksumSHA1":"T8G3A63WALmJ3JT/A0r01LG4KI0=","revision":"b12bd4004afc203f1cbd2072317c8fda30b89710","revisionTime":"2018-08-28T23:03:05Z"}, {"path":"github.com/docker/docker-credential-helpers/client","checksumSHA1":"zcDmNPSzI1wVokOiHis5+JSg2Rk=","revision":"73e5f5dbfea31ee3b81111ebbf189785fa69731c","revisionTime":"2018-07-19T07:47:51Z"}, {"path":"github.com/docker/docker-credential-helpers/credentials","checksumSHA1":"4u6EMQqD1zIqOHp76zQFLVH5V8U=","revision":"73e5f5dbfea31ee3b81111ebbf189785fa69731c","revisionTime":"2018-07-19T07:47:51Z"}, - {"path":"github.com/docker/docker/api/types","checksumSHA1":"PM15F2b73fvtlDsjFyXSMd9LJqU=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/blkiodev","checksumSHA1":"/jF0HVFiLzUUuywSjp4F/piM7BM=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/container","checksumSHA1":"+jeShxohzdtOkwAzy9e0X/fq6n4=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/filters","checksumSHA1":"RMZg2+TAla7E2KiiyF5yupWn0lY=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/mount","checksumSHA1":"9OClWW7OCikgz4QCS/sAVcvqcWk=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/network","checksumSHA1":"qZNE4g8YWfV6ryZp8kN9BwWYCeM=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/registry","checksumSHA1":"m4Jg5WnW75I65nvkEno8PElSXik=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/strslice","checksumSHA1":"OQEUS/2J2xVHpfvcsxcXzYqBSeY=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/swarm","checksumSHA1":"NT/DoroAQOev6keoJO0zKQZmjbE=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/swarm/runtime","checksumSHA1":"txs5EKTbKgVyKmKKSnaH3fr+odA=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/api/types/versions","checksumSHA1":"MZsgRjJJ0D/gAsXfKiEys+op6dE=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/errdefs","checksumSHA1":"dF9WiXuwISBPd8bQfqpuoWtB3jk=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/opts","checksumSHA1":"u6EOrZRfhdjr4up14b2JJ7MMMaY=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/archive","checksumSHA1":"wZ8yvJiuW5sK2VmoEniu1yjVlBQ=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/fileutils","checksumSHA1":"eMoRb/diYeuYLojU7ChN5DaETHc=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/homedir","checksumSHA1":"y37I+5AS96wQSiAxOayiMgnZawA=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/idtools","checksumSHA1":"+Ir5nXNDvy3cwbTlBh06lR7zUrI=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/ioutils","checksumSHA1":"Ybq78CnAoQWVBk+lkh3zykmcSjs=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/jsonmessage","checksumSHA1":"KQflv+x9hoJywgvxMwWcJqrmdkQ=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/longpath","checksumSHA1":"EXiIm2xIL7Ds+YsQUx8Z3eUYPtI=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/mount","checksumSHA1":"hx613GafM5hLibNt86ijinN89Ro=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/pools","checksumSHA1":"dj8atalGWftfM9vdzCsh9YF1Seg=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/stdcopy","checksumSHA1":"w0waeTRJ1sFygI0dZXH6l9E1c60=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/stringid","checksumSHA1":"gAUCA6R7F9kObegRGGNX5PzJahE=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/system","checksumSHA1":"RreuHCnt9a8brZsc2Q4f5T8wd+E=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/tarsum","checksumSHA1":"I6mTgOFa7NeZpYw2S5342eenRLY=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/term","checksumSHA1":"GFsDxJkQz407/2nUBmWuafG+uF8=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/pkg/term/windows","checksumSHA1":"TeOtxuBbbZtp6wDK/t4DdaGGSC0=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/registry","checksumSHA1":"0ZOQ9gYPwnxxjSIytDpwvsN5Rl0=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/registry/resumable","checksumSHA1":"jH7uQnDehFQygPP3zLC/mLSqgOk=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/volume","checksumSHA1":"Bs344j8rU7oCQyIcIhO9FJyk3ts=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/docker/volume/mounts","checksumSHA1":"J4wlScrHqr/jCIn82fgVF5mzO34=","revision":"baab736a364959f10a5ef86f0cd8a8e44bdcc576","revisionTime":"2018-11-29T15:58:16Z"}, - {"path":"github.com/docker/go-connections/nat","checksumSHA1":"JbiWTzH699Sqz25XmDlsARpMN9w=","revision":"7da10c8c50cad14494ec818dcdfb6506265c0086","revisionTime":"2017-02-03T23:56:24Z"}, - {"path":"github.com/docker/go-connections/sockets","checksumSHA1":"jUfDG3VQsA2UZHvvIXncgiddpYA=","origin":"github.com/docker/docker/vendor/github.com/docker/go-connections/sockets","revision":"320063a2ad06a1d8ada61c94c29dbe44e2d87473","revisionTime":"2018-08-16T08:14:46Z"}, - {"path":"github.com/docker/go-connections/tlsconfig","checksumSHA1":"zUG/J7nb6PWxfSXOoET6NePfyc0=","origin":"github.com/docker/docker/vendor/github.com/docker/go-connections/tlsconfig","revision":"320063a2ad06a1d8ada61c94c29dbe44e2d87473","revisionTime":"2018-08-16T08:14:46Z"}, + {"path":"github.com/docker/docker/api/types/","checksumSHA1":"3l48PzQYRyVBD3qRJ0x84/q5C2E=","origin":"github.com/docker/engine/api/types/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/blkiodev/","checksumSHA1":"/jF0HVFiLzUUuywSjp4F/piM7BM=","origin":"github.com/docker/engine/api/types/blkiodev/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/container","checksumSHA1":"lsxFU6qegOtXClSTthOvfPtly5I=","origin":"github.com/docker/engine/api/types/container/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/filters","checksumSHA1":"y9EA6+kZQLx6kCM277CFHTm4eiw=","origin":"github.com/docker/engine/api/types/filters/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/mount","checksumSHA1":"k9CaJVvYL7SxcIP72ng/YcOuF9k=","origin":"github.com/docker/engine/api/types/mount/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/network/","checksumSHA1":"qZNE4g8YWfV6ryZp8kN9BwWYCeM=","origin":"github.com/docker/engine/api/types/network/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/registry/","checksumSHA1":"m4Jg5WnW75I65nvkEno8PElSXik=","origin":"github.com/docker/engine/api/types/registry/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/strslice/","checksumSHA1":"OQEUS/2J2xVHpfvcsxcXzYqBSeY=","origin":"github.com/docker/engine/api/types/strslice/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/swarm/","checksumSHA1":"hHIt7htGk3uVtYCQid713a752Ik=","origin":"github.com/docker/engine/api/types/swarm/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/swarm/runtime/","checksumSHA1":"txs5EKTbKgVyKmKKSnaH3fr+odA=","origin":"github.com/docker/engine/api/types/swarm/runtime/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/api/types/versions/","checksumSHA1":"MZsgRjJJ0D/gAsXfKiEys+op6dE=","origin":"github.com/docker/engine/api/types/versions/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/errdefs/","checksumSHA1":"dF9WiXuwISBPd8bQfqpuoWtB3jk=","origin":"github.com/docker/engine/errdefs/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/opts/","checksumSHA1":"u6EOrZRfhdjr4up14b2JJ7MMMaY=","origin":"github.com/docker/engine/opts/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/fileutils/","checksumSHA1":"CYkS5Yh4GZ80KS+n/o+c5d0ktsA=","origin":"github.com/docker/engine/pkg/fileutils/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/homedir/","checksumSHA1":"y37I+5AS96wQSiAxOayiMgnZawA=","origin":"github.com/docker/engine/pkg/homedir/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/idtools/","checksumSHA1":"+Ir5nXNDvy3cwbTlBh06lR7zUrI=","origin":"github.com/docker/engine/pkg/idtools/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/ioutils/","checksumSHA1":"Ybq78CnAoQWVBk+lkh3zykmcSjs=","origin":"github.com/docker/engine/pkg/ioutils/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/jsonmessage/","checksumSHA1":"KQflv+x9hoJywgvxMwWcJqrmdkQ=","origin":"github.com/docker/engine/pkg/jsonmessage/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/longpath/","checksumSHA1":"EXiIm2xIL7Ds+YsQUx8Z3eUYPtI=","origin":"github.com/docker/engine/pkg/longpath/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/mount/","checksumSHA1":"hx613GafM5hLibNt86ijinN89Ro=","origin":"github.com/docker/engine/pkg/mount/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/pools/","checksumSHA1":"dj8atalGWftfM9vdzCsh9YF1Seg=","origin":"github.com/docker/engine/pkg/pools/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/stdcopy/","checksumSHA1":"w0waeTRJ1sFygI0dZXH6l9E1c60=","origin":"github.com/docker/engine/pkg/stdcopy/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/stringid/","checksumSHA1":"gAUCA6R7F9kObegRGGNX5PzJahE=","origin":"github.com/docker/engine/pkg/stringid/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/system/","checksumSHA1":"RreuHCnt9a8brZsc2Q4f5T8wd+E=","origin":"github.com/docker/engine/pkg/system/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/tarsum/","checksumSHA1":"I6mTgOFa7NeZpYw2S5342eenRLY=","origin":"github.com/docker/engine/pkg/tarsum/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/term/","checksumSHA1":"GFsDxJkQz407/2nUBmWuafG+uF8=","origin":"github.com/docker/engine/pkg/term/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/pkg/term/windows/","checksumSHA1":"TeOtxuBbbZtp6wDK/t4DdaGGSC0=","origin":"github.com/docker/engine/pkg/term/windows/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/registry/","checksumSHA1":"eu9F6Z6u/c4IbJOzdmCF1moPpVg=","origin":"github.com/docker/engine/registry/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/registry/resumable/","checksumSHA1":"jH7uQnDehFQygPP3zLC/mLSqgOk=","origin":"github.com/docker/engine/registry/resumable/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/volume/","checksumSHA1":"Bs344j8rU7oCQyIcIhO9FJyk3ts=","origin":"github.com/docker/engine/volume/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/volume/mounts","checksumSHA1":"61OMV+MFRJxqyaGGUX9+YWqlp8M=","origin":"github.com/docker/engine/volume/mounts/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/docker/volume/mounts/","checksumSHA1":"61OMV+MFRJxqyaGGUX9+YWqlp8M=","origin":"github.com/docker/engine/volume/mounts/","revision":"d2cfbce3f3b0e028562fc03178851a1c170099b5","revisionTime":"2019-04-23T20:17:26Z","version":"v18.09.6","versionExact":"v18.09.6"}, + {"path":"github.com/docker/go-connections/nat/","checksumSHA1":"1IPGX6/BnX7QN4DjbBk0UafTB2U=","revision":"7395e3f8aa162843a74ed6d48e79627d9792ac55","revisionTime":"2018-02-28T14:10:15Z","version":"v0.4.0","versionExact":"v0.4.0"}, + {"path":"github.com/docker/go-connections/sockets/","checksumSHA1":"jUfDG3VQsA2UZHvvIXncgiddpYA=","revision":"7395e3f8aa162843a74ed6d48e79627d9792ac55","revisionTime":"2018-02-28T14:10:15Z","version":"v0.4.0","versionExact":"v0.4.0"}, + {"path":"github.com/docker/go-connections/tlsconfig/","checksumSHA1":"KGILLnJybU/+xWJu8rgM4CpYT2M=","revision":"7395e3f8aa162843a74ed6d48e79627d9792ac55","revisionTime":"2018-02-28T14:10:15Z","version":"v0.4.0","versionExact":"v0.4.0"}, {"path":"github.com/docker/go-metrics","checksumSHA1":"kHVt4M5Pfby2dhurp+hZJfQhzVU=","revision":"399ea8c73916000c64c2c76e8da00ca82f8387ab","revisionTime":"2018-02-09T01:25:29Z"}, {"path":"github.com/docker/go-units","checksumSHA1":"18hmvak2Dc9x5cgKeZ2iApviT7w=","comment":"v0.1.0-23-g5d2041e","revision":"5d2041e26a699eaca682e2ea41c8f891e1060444"}, - {"path":"github.com/docker/libnetwork/ipamutils","checksumSHA1":"vcP3kQNGWKHenrvQxfu4FZkB468=","origin":"github.com/docker/docker/vendor/github.com/docker/libnetwork/ipamutils","revision":"320063a2ad06a1d8ada61c94c29dbe44e2d87473","revisionTime":"2018-08-16T08:14:46Z"}, + {"path":"github.com/docker/libnetwork/ipamutils","checksumSHA1":"X07lwsZTwq6wVkKDAPxyTmimwq8=","revision":"ce86291472a965c5cd6adaf2c83acaee6bab630a","revisionTime":"2019-06-04T16:22:10Z"}, {"path":"github.com/dustin/go-humanize","checksumSHA1":"xteP9Px90oMrg/HZuqvZkpXCR+s=","revision":"8929fe90cee4b2cb9deb468b51fb34eba64d1bf0"}, {"path":"github.com/elazarl/go-bindata-assetfs","checksumSHA1":"7DxViusFRJ7UPH0jZqYatwDrOkY=","revision":"30f82fa23fd844bd5bb1e5f216db87fd77b5eb43","revisionTime":"2017-02-27T21:27:28Z"}, {"path":"github.com/fatih/color","checksumSHA1":"VsE3zx2d8kpwj97TWhYddzAwBrY=","revision":"507f6050b8568533fb3f5504de8e5205fa62a114","revisionTime":"2018-02-13T13:34:03Z"}, @@ -238,7 +239,7 @@ {"path":"github.com/hashicorp/vault/helper/jsonutil","checksumSHA1":"POgkM3GrjRFw6H3sw95YNEs552A=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/parseutil","checksumSHA1":"HA2MV/2XI0HcoThSRxQCaBZR2ps=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/strutil","checksumSHA1":"HdVuYhZ5TuxeIFqi0jy2GHW7a4o=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, - {"path":"github.com/hashicorp/yamux","checksumSHA1":"m9OKUPd/iliwKxs+LCSmAGpDJOs=","revision":"7221087c3d281fda5f794e28c2ea4c6e4d5c4558","revisionTime":"2018-09-17T20:50:41Z"}, + {"path":"github.com/hashicorp/yamux","checksumSHA1":"ldkAQ1CpiAaQ9sti0qIch+UyRsI=","revision":"7221087c3d281fda5f794e28c2ea4c6e4d5c4558","revisionTime":"2018-09-17T20:50:41Z"}, {"path":"github.com/hpcloud/tail/util","checksumSHA1":"0xM336Lb25URO/1W1/CtGoRygVU=","revision":"37f4271387456dd1bf82ab1ad9229f060cc45386","revisionTime":"2017-08-14T16:06:53Z"}, {"path":"github.com/hpcloud/tail/watch","checksumSHA1":"TP4OAv5JMtzj2TB6OQBKqauaKDc=","revision":"37f4271387456dd1bf82ab1ad9229f060cc45386","revisionTime":"2017-08-14T16:06:53Z"}, {"path":"github.com/jmespath/go-jmespath","checksumSHA1":"3/Bhy+ua/DCv2ElMD5GzOYSGN6g=","comment":"0.2.2-2-gc01cf91","revision":"c01cf91b011868172fdcd9f41838e80c9d716264"}, @@ -262,6 +263,7 @@ {"path":"github.com/mitchellh/mapstructure","checksumSHA1":"4Js6Jlu93Wa0o6Kjt393L9Z7diE=","revision":"281073eb9eb092240d33ef253c404f1cca550309"}, {"path":"github.com/mitchellh/reflectwalk","checksumSHA1":"KqsMqI+Y+3EFYPhyzafpIneaVCM=","revision":"8d802ff4ae93611b807597f639c19f76074df5c6","revisionTime":"2017-05-08T17:38:06Z"}, {"path":"github.com/moby/moby/daemon/caps","checksumSHA1":"FoDTHct8ocl470GYc0i+JRWfrys=","revision":"39377bb96d459d2ef59bd2bad75468638a7f86a3","revisionTime":"2018-01-18T19:02:33Z"}, + {"path":"github.com/morikuni/aec","checksumSHA1":"OfTtsGbmK3BBWGvGfXJ1BV3fzmY=","revision":"39771216ff4c63d11f5e604076f9c45e8be1067b","revisionTime":"2017-01-13T03:34:06Z"}, {"path":"github.com/mrunalp/fileutils","checksumSHA1":"EKGlMEHq/nwBXQGi9JN/y+H7YMU=","origin":"github.com/opencontainers/runc/vendor/github.com/mrunalp/fileutils","revision":"459bfaec1fc6c17d8bfb12d0a0f69e7e7271ed2a","revisionTime":"2018-08-23T14:46:37Z"}, {"path":"github.com/oklog/run","checksumSHA1":"nf3UoPNBIut7BL9nWE8Fw2X2j+Q=","revision":"6934b124db28979da51d3470dadfa34d73d72652","revisionTime":"2018-03-08T00:51:04Z"}, {"path":"github.com/onsi/ginkgo","checksumSHA1":"cwbidLG1ET7YSqlwca+nSfYxIbg=","revision":"ba8e856bb854d6771a72ddf6497a42dad3a0c971","revisionTime":"2018-03-12T10:34:14Z"}, @@ -394,9 +396,10 @@ {"path":"golang.org/x/net/http2","checksumSHA1":"kKuxyoDujo5CopTxAvvZ1rrLdd0=","revision":"ab5485076ff3407ad2d02db054635913f017b0ed","revisionTime":"2017-07-19T21:11:51Z"}, {"path":"golang.org/x/net/http2/hpack","checksumSHA1":"ezWhc7n/FtqkLDQKeU2JbW+80tE=","revision":"ab5485076ff3407ad2d02db054635913f017b0ed","revisionTime":"2017-07-19T21:11:51Z"}, {"path":"golang.org/x/net/idna","checksumSHA1":"g/Z/Ka0VgJESgQK7/SJCjm/aX0I=","revision":"ab5485076ff3407ad2d02db054635913f017b0ed","revisionTime":"2017-07-19T21:11:51Z"}, + {"path":"golang.org/x/net/internal/socks","checksumSHA1":"f3Y7JIZH61oMmp8nphqe8Mg+XoU=","revision":"461777fb6f67e8cb9d70cda16573678d085a74cf","revisionTime":"2019-06-07T17:48:03Z"}, {"path":"golang.org/x/net/internal/timeseries","checksumSHA1":"UxahDzW2v4mf/+aFxruuupaoIwo=","revision":"ab5485076ff3407ad2d02db054635913f017b0ed","revisionTime":"2017-07-19T21:11:51Z"}, {"path":"golang.org/x/net/lex/httplex","checksumSHA1":"3xyuaSNmClqG4YWC7g0isQIbUTc=","revision":"ab5485076ff3407ad2d02db054635913f017b0ed","revisionTime":"2017-07-19T21:11:51Z"}, - {"path":"golang.org/x/net/proxy","checksumSHA1":"r9l4r3H6FOLQ0c2JaoXpopFjpnw=","origin":"github.com/docker/docker/vendor/golang.org/x/net/proxy","revision":"320063a2ad06a1d8ada61c94c29dbe44e2d87473","revisionTime":"2018-08-16T08:14:46Z"}, + {"path":"golang.org/x/net/proxy","checksumSHA1":"28Sn0XihdqNv3MysxyRalibC3Tg=","revision":"461777fb6f67e8cb9d70cda16573678d085a74cf","revisionTime":"2019-06-07T17:48:03Z"}, {"path":"golang.org/x/net/trace","checksumSHA1":"u/r66lwYfgg682u5hZG7/E7+VCY=","revision":"ab5485076ff3407ad2d02db054635913f017b0ed","revisionTime":"2017-07-19T21:11:51Z"}, {"path":"golang.org/x/sync/errgroup","checksumSHA1":"S0DP7Pn7sZUmXc55IzZnNvERu6s=","revision":"316e794f7b5e3df4e95175a45a5fb8b12f85cb4f","revisionTime":"2016-07-15T18:54:39Z"}, {"path":"golang.org/x/sys/cpu","checksumSHA1":"REkmyB368pIiip76LiqMLspgCRk=","revision":"1b2967e3c290b7c545b3db0deeda16e9be4f98a2","revisionTime":"2018-07-06T09:56:39Z"}, From 655a9b1e6a0398bb376541049e5f233a72d5fd24 Mon Sep 17 00:00:00 2001 From: Yoan Blanc Date: Sat, 22 Jun 2019 11:41:34 +0200 Subject: [PATCH 2/2] govendor sync fix Signed-off-by: Yoan Blanc --- vendor/vendor.json | 1 - 1 file changed, 1 deletion(-) diff --git a/vendor/vendor.json b/vendor/vendor.json index 47225e49632..87e797c3f7c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -313,7 +313,6 @@ {"path":"github.com/opencontainers/runc/libcontainer/cgroups/systemd","checksumSHA1":"d7B9MiKb1k1Egh5qkNokIfcZ+OY=","revision":"6cc515888830787a93d82138821f0309ad970640","revisionTime":"2019-06-11T12:12:36Z"}, {"path":"github.com/opencontainers/runc/libcontainer/configs","checksumSHA1":"v9sgw4eYRNSsJUSG33OoFIwLqRI=","revision":"6cc515888830787a93d82138821f0309ad970640","revisionTime":"2019-06-11T12:12:36Z"}, {"path":"github.com/opencontainers/runc/libcontainer/configs/validate","checksumSHA1":"hUveFGK1HhGenf0OVoYZWccoW9I=","revision":"6cc515888830787a93d82138821f0309ad970640","revisionTime":"2019-06-11T12:12:36Z"}, - {"path":"github.com/opencontainers/runc/libcontainer/criurpc","checksumSHA1":"n7G7Egz/tOPacXuq+nkvnFai3eU=","revision":"369b920277d27630441336775cd728bc0f19e496","revisionTime":"2018-09-07T18:53:11Z"}, {"path":"github.com/opencontainers/runc/libcontainer/devices","checksumSHA1":"2CwtFvz9kB0RSjFlcCkmq4taJ9U=","revision":"6cc515888830787a93d82138821f0309ad970640","revisionTime":"2019-06-11T12:12:36Z"}, {"path":"github.com/opencontainers/runc/libcontainer/intelrdt","checksumSHA1":"sAbowQ7hjveSH5ADUD9IYXnEAJM=","revision":"6cc515888830787a93d82138821f0309ad970640","revisionTime":"2019-06-11T12:12:36Z"}, {"path":"github.com/opencontainers/runc/libcontainer/keys","checksumSHA1":"mKxBw0il2IWjWYgksX+17ufDw34=","revision":"6cc515888830787a93d82138821f0309ad970640","revisionTime":"2019-06-11T12:12:36Z"},