diff --git a/vendor.conf b/vendor.conf index 3879dbe5d1e..16bfb12eee0 100644 --- a/vendor.conf +++ b/vendor.conf @@ -5,7 +5,7 @@ github.com/containerd/continuity master github.com/containernetworking/cni v0.6.0 github.com/seccomp/containers-golang master github.com/containers/image master -github.com/containers/storage 0b8ab959bba614a4f88bb3791dbc078c3d47f259 +github.com/containers/storage 9cbb6cb3fc2044eae8b8fd8d8da081fe812858c4 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/docker b8571fd81c7d2223c9ecbf799c693e3ef1daaea9 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index ec54a502e9b..ebc1e99a0fe 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -2,6 +2,7 @@ package storage import ( "encoding/json" + "fmt" "io/ioutil" "os" "path/filepath" @@ -278,7 +279,8 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { - return nil, ErrDuplicateName + return nil, errors.Wrapf(ErrDuplicateName, + fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) } } if err == nil { diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go index 40b912bb3c5..6e83808d4f2 100644 --- a/vendor/github.com/containers/storage/containers_ffjson.go +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -1,5 +1,6 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./containers.go +// source: containers.go +// package storage diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index c12e73b3bcd..bcba12de96b 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "path/filepath" - "syscall" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/reexec" @@ -56,47 +55,7 @@ func chownByMapsMain() { if err != nil { return fmt.Errorf("error walking to %q: %v", path, err) } - sysinfo := info.Sys() - if st, ok := sysinfo.(*syscall.Stat_t); ok { - // Map an on-disk UID/GID pair from host to container - // using the first map, then back to the host using the - // second map. Skip that first step if they're 0, to - // compensate for cases where a parent layer should - // have had a mapped value, but didn't. - uid, gid := int(st.Uid), int(st.Gid) - if toContainer != nil { - pair := idtools.IDPair{ - UID: uid, - GID: gid, - } - mappedUid, mappedGid, err := toContainer.ToContainer(pair) - if err != nil { - if (uid != 0) || (gid != 0) { - return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) - } - mappedUid, mappedGid = uid, gid - } - uid, gid = mappedUid, mappedGid - } - if toHost != nil { - pair := idtools.IDPair{ - UID: uid, - GID: gid, - } - mappedPair, err := toHost.ToHost(pair) - if err != nil { - return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) - } - uid, gid = mappedPair.UID, mappedPair.GID - } - if uid != int(st.Uid) || gid != int(st.Gid) { - // Make the change. - if err := syscall.Lchown(path, uid, gid); err != nil { - return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err) - } - } - } - return nil + return platformLChown(path, info, toHost, toContainer) } if err := filepath.Walk(".", chown); err != nil { fmt.Fprintf(os.Stderr, "error during chown: %v", err) diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go new file mode 100644 index 00000000000..5454657ec92 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -0,0 +1,55 @@ +// +build !windows + +package graphdriver + +import ( + "fmt" + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" +) + +func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + sysinfo := info.Sys() + if st, ok := sysinfo.(*syscall.Stat_t); ok { + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUid, mappedGid, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) + } + mappedUid, mappedGid = uid, gid + } + uid, gid = mappedUid, mappedGid + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHost(pair) + if err != nil { + return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + // Make the change. + if err := syscall.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err) + } + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go new file mode 100644 index 00000000000..31bd5bb52dd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -0,0 +1,14 @@ +// +build windows + +package graphdriver + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" +) + +func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + return &os.PathError{"lchown", path, syscall.EWINDOWS} +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go index 1df0317898d..f4dc22a9615 100644 --- a/vendor/github.com/containers/storage/drivers/chroot_windows.go +++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go @@ -1,7 +1,7 @@ package graphdriver import ( - "os" + "fmt" "syscall" ) diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 6b5f6912f73..5d5d61d9c83 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -24,6 +24,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/ostree" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" units "github.com/docker/go-units" @@ -84,6 +85,9 @@ type overlayOptions struct { overrideKernelCheck bool imageStores []string quota quota.Quota + fuseProgram string + ostreeRepo string + skipMountHome bool } // Driver contains information about the home directory and the list of active mounts that are created using this driver. @@ -98,6 +102,7 @@ type Driver struct { naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker + convert map[string]bool } var ( @@ -147,15 +152,28 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID) - if err != nil { - os.Remove(filepath.Join(home, linkDir)) - os.Remove(home) - return nil, errors.Wrap(err, "kernel does not support overlay fs") + var supportsDType bool + if opts.fuseProgram != "" { + supportsDType = true + } else { + supportsDType, err = supportsOverlay(home, fsMagic, rootUID, rootGID) + if err != nil { + os.Remove(filepath.Join(home, linkDir)) + os.Remove(home) + return nil, errors.Wrap(err, "kernel does not support overlay fs") + } } - if err := mount.MakePrivate(home); err != nil { - return nil, err + if !opts.skipMountHome { + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + } + + if opts.ostreeRepo != "" { + if err := ostree.CreateOSTreeRepository(opts.ostreeRepo, rootUID, rootGID); err != nil { + return nil, err + } } d := &Driver{ @@ -167,6 +185,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap supportsDType: supportsDType, locker: locker.New(), options: *opts, + convert: make(map[string]bool), } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d) @@ -227,6 +246,25 @@ func parseOptions(options []string) (*overlayOptions, error) { } o.imageStores = append(o.imageStores, store) } + case ".fuse_program", "overlay.fuse_program", "overlay2.fuse_program": + logrus.Debugf("overlay: fuse_program=%s", val) + _, err := os.Stat(val) + if err != nil { + return nil, fmt.Errorf("overlay: can't stat FUSE program %s: %v", val, err) + } + o.fuseProgram = val + case "overlay2.ostree_repo", "overlay.ostree_repo", ".ostree_repo": + logrus.Debugf("overlay: ostree_repo=%s", val) + if !ostree.OstreeSupport() { + return nil, fmt.Errorf("overlay: ostree_repo specified but support for ostree is missing") + } + o.ostreeRepo = val + case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home": + logrus.Debugf("overlay: skip_mount_home=%s", val) + o.skipMountHome, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } default: return nil, fmt.Errorf("overlay: Unknown option %s", key) } @@ -236,6 +274,7 @@ func parseOptions(options []string) (*overlayOptions, error) { func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { // We can try to modprobe overlay first + exec.Command("modprobe", "overlay").Run() layerDir, err := ioutil.TempDir(home, "compat") @@ -380,6 +419,11 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } + + if d.options.ostreeRepo != "" { + d.convert[id] = true + } + return d.create(id, parent, opts) } @@ -547,6 +591,12 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { func (d *Driver) Remove(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) + + // Ignore errors, we don't want to fail if the ostree branch doesn't exist, + if d.options.ostreeRepo != "" { + ostree.DeleteOSTree(d.options.ostreeRepo, id) + } + dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { @@ -663,7 +713,7 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. - if len(mountData) > pageSize { + if len(mountData) > pageSize || d.options.fuseProgram != "" { //FIXME: We need to figure out to get this to work with additional stores opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), path.Join(id, "diff"), path.Join(id, "work")) mountData = label.FormatMountLabel(opts, mountLabel) @@ -671,8 +721,16 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } - mount = func(source string, target string, mType string, flags uintptr, label string) error { - return mountFrom(d.home, source, target, mType, flags, label) + if d.options.fuseProgram != "" { + mount = func(source string, target string, mType string, flags uintptr, label string) error { + cmdRootless := exec.Command(d.options.fuseProgram, "-o", label, target) + cmdRootless.Dir = d.home + return cmdRootless.Run() + } + } else { + mount = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } } mountTarget = path.Join(id, "merged") } @@ -764,6 +822,13 @@ func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent str return 0, err } + _, convert := d.convert[id] + if convert { + if err := ostree.ConvertToOSTree(d.options.ostreeRepo, applyDir, id); err != nil { + return 0, err + } + } + return directory.Size(applyDir) } diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index cee55f8d164..9314cb87603 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -9,6 +9,7 @@ import ( "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ostree" "github.com/containers/storage/pkg/system" "github.com/opencontainers/selinux/go-selinux/label" ) @@ -42,6 +43,27 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap d.homes = append(d.homes, strings.Split(option[12:], ",")...) continue } + if strings.HasPrefix(option, "vfs.ostree_repo=") { + if !ostree.OstreeSupport() { + return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing") + } + d.ostreeRepo = option[16:] + } + if strings.HasPrefix(option, ".ostree_repo=") { + if !ostree.OstreeSupport() { + return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing") + } + d.ostreeRepo = option[13:] + } + } + if d.ostreeRepo != "" { + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := ostree.CreateOSTreeRepository(d.ostreeRepo, rootUID, rootGID); err != nil { + return nil, err + } } return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil } @@ -53,6 +75,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap type Driver struct { homes []string idMappings *idtools.IDMappings + ostreeRepo string } func (d *Driver) String() string { @@ -77,11 +100,15 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) + return d.create(id, parent, opts, false) } // Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool) error { if opts != nil && len(opts.StorageOpt) != 0 { return fmt.Errorf("--storage-opt is not supported for vfs") } @@ -106,14 +133,23 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { label.SetFileLabel(dir, mountLabel) } - if parent == "" { - return nil + if parent != "" { + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } } - parentDir, err := d.Get(parent, "") - if err != nil { - return fmt.Errorf("%s: %s", parent, err) + + if ro && d.ostreeRepo != "" { + if err := ostree.ConvertToOSTree(d.ostreeRepo, dir, id); err != nil { + return err + } } - return CopyWithTar(parentDir, dir) + return nil + } func (d *Driver) dir(id string) string { @@ -132,6 +168,10 @@ func (d *Driver) dir(id string) string { // Remove deletes the content from the directory for a given id. func (d *Driver) Remove(id string) error { + if d.ostreeRepo != "" { + // Ignore errors, we don't want to fail if the ostree branch doesn't exist, + ostree.DeleteOSTree(d.ostreeRepo, id) + } return system.EnsureRemoveAll(d.dir(id)) } diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index b750715bfee..1d84b0b6af5 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -940,11 +940,6 @@ func (d *Driver) AdditionalImageStores() []string { return nil } -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return nil -} - // UpdateLayerIDMap changes ownerships in the layer's filesystem tree from // matching those in toContainer to matching those in toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index 76a11fb0cd4..80fae6dcee5 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -40,6 +40,11 @@ type Image struct { // same top layer. TopLayer string `json:"layer,omitempty"` + // MappedTopLayers are the IDs of alternate versions of the top layer + // which have the same contents and parent, and which differ from + // TopLayer only in which ID mappings they use. + MappedTopLayers []string `json:"mapped-layers,omitempty"` + // Metadata is data we keep for the convenience of the caller. It is not // expected to be large, since it is kept in memory. Metadata string `json:"metadata,omitempty"` @@ -126,16 +131,17 @@ type imageStore struct { func copyImage(i *Image) *Image { return &Image{ - ID: i.ID, - Digest: i.Digest, - Names: copyStringSlice(i.Names), - TopLayer: i.TopLayer, - Metadata: i.Metadata, - BigDataNames: copyStringSlice(i.BigDataNames), - BigDataSizes: copyStringInt64Map(i.BigDataSizes), - BigDataDigests: copyStringDigestMap(i.BigDataDigests), - Created: i.Created, - Flags: copyStringInterfaceMap(i.Flags), + ID: i.ID, + Digest: i.Digest, + Names: copyStringSlice(i.Names), + TopLayer: i.TopLayer, + MappedTopLayers: copyStringSlice(i.MappedTopLayers), + Metadata: i.Metadata, + BigDataNames: copyStringSlice(i.BigDataNames), + BigDataSizes: copyStringInt64Map(i.BigDataSizes), + BigDataDigests: copyStringDigestMap(i.BigDataDigests), + Created: i.Created, + Flags: copyStringInterfaceMap(i.Flags), } } @@ -362,6 +368,14 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c return image, err } +func (r *imageStore) addMappedTopLayer(id, layer string) error { + if image, ok := r.lookup(id); ok { + image.MappedTopLayers = append(image.MappedTopLayers, layer) + return r.Save() + } + return ErrImageUnknown +} + func (r *imageStore) Metadata(id string) (string, error) { if image, ok := r.lookup(id); ok { return image.Metadata, nil diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go index f91ee6d4f94..6b40ebd5978 100644 --- a/vendor/github.com/containers/storage/images_ffjson.go +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./images.go +// source: images.go package storage @@ -64,6 +64,22 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { fflib.WriteJsonString(buf, string(j.TopLayer)) buf.WriteByte(',') } + if len(j.MappedTopLayers) != 0 { + buf.WriteString(`"mapped-layers":`) + if j.MappedTopLayers != nil { + buf.WriteString(`[`) + for i, v := range j.MappedTopLayers { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } if len(j.Metadata) != 0 { buf.WriteString(`"metadata":`) fflib.WriteJsonString(buf, string(j.Metadata)) @@ -157,6 +173,8 @@ const ( ffjtImageTopLayer + ffjtImageMappedTopLayers + ffjtImageMetadata ffjtImageBigDataNames @@ -178,6 +196,8 @@ var ffjKeyImageNames = []byte("names") var ffjKeyImageTopLayer = []byte("layer") +var ffjKeyImageMappedTopLayers = []byte("mapped-layers") + var ffjKeyImageMetadata = []byte("metadata") var ffjKeyImageBigDataNames = []byte("big-data-names") @@ -311,7 +331,12 @@ mainparse: case 'm': - if bytes.Equal(ffjKeyImageMetadata, kn) { + if bytes.Equal(ffjKeyImageMappedTopLayers, kn) { + currentKey = ffjtImageMappedTopLayers + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyImageMetadata, kn) { currentKey = ffjtImageMetadata state = fflib.FFParse_want_colon goto mainparse @@ -363,6 +388,12 @@ mainparse: goto mainparse } + if fflib.EqualFoldRight(ffjKeyImageMappedTopLayers, kn) { + currentKey = ffjtImageMappedTopLayers + state = fflib.FFParse_want_colon + goto mainparse + } + if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) { currentKey = ffjtImageTopLayer state = fflib.FFParse_want_colon @@ -416,6 +447,9 @@ mainparse: case ffjtImageTopLayer: goto handle_TopLayer + case ffjtImageMappedTopLayers: + goto handle_MappedTopLayers + case ffjtImageMetadata: goto handle_Metadata @@ -600,6 +634,80 @@ handle_TopLayer: state = fflib.FFParse_after_value goto mainparse +handle_MappedTopLayers: + + /* handler: j.MappedTopLayers type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.MappedTopLayers = nil + } else { + + j.MappedTopLayers = []string{} + + wantVal := true + + for { + + var tmpJMappedTopLayers string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJMappedTopLayers type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJMappedTopLayers = string(string(outBuf)) + + } + } + + j.MappedTopLayers = append(j.MappedTopLayers, tmpJMappedTopLayers) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + handle_Metadata: /* handler: j.Metadata type=string kind=string quoted=false*/ diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go index 09b5d0f33e5..125b5d8c971 100644 --- a/vendor/github.com/containers/storage/layers_ffjson.go +++ b/vendor/github.com/containers/storage/layers_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./layers.go +// source: layers.go package storage diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index dcc5227feba..4c43826256d 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -56,6 +56,11 @@ type ( // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool } ) @@ -396,6 +401,11 @@ type tarAppender struct { // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool } func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { @@ -446,6 +456,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } + if ta.CopyPass { + copyPassHeader(hdr) + } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly @@ -710,6 +723,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) options.ChownOpts, ) ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass defer func() { // Make sure to check the error on Close. @@ -1039,6 +1053,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error { UIDMaps: tarMappings.UIDs(), GIDMaps: tarMappings.GIDs(), Compression: Uncompressed, + CopyPass: true, } archive, err := TarWithOptions(src, options) if err != nil { @@ -1145,6 +1160,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + copyPassHeader(hdr) if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil { return err diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go new file mode 100644 index 00000000000..22b8b48cc10 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -0,0 +1,11 @@ +// +build go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { + hdr.Format = tar.FormatPAX +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go new file mode 100644 index 00000000000..d10d595fa8b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -0,0 +1,10 @@ +// +build !go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go index 211f4e92b51..9b8103e4da5 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./pkg/archive/archive.go +// source: pkg/archive/archive.go package archive @@ -491,6 +491,11 @@ func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error { } else { buf.WriteString(`,"InUserNS":false`) } + if j.CopyPass { + buf.WriteString(`,"CopyPass":true`) + } else { + buf.WriteString(`,"CopyPass":false`) + } buf.WriteByte('}') return nil } @@ -524,6 +529,8 @@ const ( ffjtTarOptionsRebaseNames ffjtTarOptionsInUserNS + + ffjtTarOptionsCopyPass ) var ffjKeyTarOptionsIncludeFiles = []byte("IncludeFiles") @@ -552,6 +559,8 @@ var ffjKeyTarOptionsRebaseNames = []byte("RebaseNames") var ffjKeyTarOptionsInUserNS = []byte("InUserNS") +var ffjKeyTarOptionsCopyPass = []byte("CopyPass") + // UnmarshalJSON umarshall json - template of ffjson func (j *TarOptions) UnmarshalJSON(input []byte) error { fs := fflib.NewFFLexer(input) @@ -624,6 +633,11 @@ mainparse: currentKey = ffjtTarOptionsChownOpts state = fflib.FFParse_want_colon goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsCopyPass, kn) { + currentKey = ffjtTarOptionsCopyPass + state = fflib.FFParse_want_colon + goto mainparse } case 'E': @@ -704,6 +718,12 @@ mainparse: } + if fflib.EqualFoldRight(ffjKeyTarOptionsCopyPass, kn) { + currentKey = ffjtTarOptionsCopyPass + state = fflib.FFParse_want_colon + goto mainparse + } + if fflib.EqualFoldRight(ffjKeyTarOptionsInUserNS, kn) { currentKey = ffjtTarOptionsInUserNS state = fflib.FFParse_want_colon @@ -838,6 +858,9 @@ mainparse: case ffjtTarOptionsInUserNS: goto handle_InUserNS + case ffjtTarOptionsCopyPass: + goto handle_CopyPass + case ffjtTarOptionsnosuchkey: err = fs.SkipField(tok) if err != nil { @@ -1481,6 +1504,41 @@ handle_InUserNS: state = fflib.FFParse_after_value goto mainparse +handle_CopyPass: + + /* handler: j.CopyPass type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.CopyPass = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.CopyPass = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + wantedvalue: return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) wrongtokenerror: @@ -1773,6 +1831,11 @@ func (j *tarAppender) MarshalJSONBuf(buf fflib.EncodingBuffer) error { if err != nil { return err } + if j.CopyPass { + buf.WriteString(`,"CopyPass":true`) + } else { + buf.WriteString(`,"CopyPass":false`) + } buf.WriteByte('}') return nil } @@ -1792,6 +1855,8 @@ const ( ffjttarAppenderChownOpts ffjttarAppenderWhiteoutConverter + + ffjttarAppenderCopyPass ) var ffjKeytarAppenderTarWriter = []byte("TarWriter") @@ -1806,6 +1871,8 @@ var ffjKeytarAppenderChownOpts = []byte("ChownOpts") var ffjKeytarAppenderWhiteoutConverter = []byte("WhiteoutConverter") +var ffjKeytarAppenderCopyPass = []byte("CopyPass") + // UnmarshalJSON umarshall json - template of ffjson func (j *tarAppender) UnmarshalJSON(input []byte) error { fs := fflib.NewFFLexer(input) @@ -1881,6 +1948,11 @@ mainparse: currentKey = ffjttarAppenderChownOpts state = fflib.FFParse_want_colon goto mainparse + + } else if bytes.Equal(ffjKeytarAppenderCopyPass, kn) { + currentKey = ffjttarAppenderCopyPass + state = fflib.FFParse_want_colon + goto mainparse } case 'I': @@ -1917,6 +1989,12 @@ mainparse: } + if fflib.EqualFoldRight(ffjKeytarAppenderCopyPass, kn) { + currentKey = ffjttarAppenderCopyPass + state = fflib.FFParse_want_colon + goto mainparse + } + if fflib.SimpleLetterEqualFold(ffjKeytarAppenderWhiteoutConverter, kn) { currentKey = ffjttarAppenderWhiteoutConverter state = fflib.FFParse_want_colon @@ -1988,6 +2066,9 @@ mainparse: case ffjttarAppenderWhiteoutConverter: goto handle_WhiteoutConverter + case ffjttarAppenderCopyPass: + goto handle_CopyPass + case ffjttarAppendernosuchkey: err = fs.SkipField(tok) if err != nil { @@ -2211,6 +2292,41 @@ handle_WhiteoutConverter: state = fflib.FFParse_after_value goto mainparse +handle_CopyPass: + + /* handler: j.CopyPass type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.CopyPass = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.CopyPass = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + wantedvalue: return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) wrongtokenerror: diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go deleted file mode 100644 index 70f9c5564ad..00000000000 --- a/vendor/github.com/containers/storage/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/containers/storage/pkg/archive" - "github.com/sirupsen/logrus" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "storage-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "storage-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index 397251bdb8b..05522d68bb2 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd solaris +// +build linux darwin freebsd solaris package directory diff --git a/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go b/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go new file mode 100644 index 00000000000..0a8e7d67964 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go @@ -0,0 +1,19 @@ +// +build !ostree + +package ostree + +func OstreeSupport() bool { + return false +} + +func DeleteOSTree(repoLocation, id string) error { + return nil +} + +func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error { + return nil +} + +func ConvertToOSTree(repoLocation, root, id string) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ostree/ostree.go b/vendor/github.com/containers/storage/pkg/ostree/ostree.go new file mode 100644 index 00000000000..e9b57a0fb4f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ostree/ostree.go @@ -0,0 +1,198 @@ +// +build ostree + +package ostree + +import ( + "fmt" + "golang.org/x/sys/unix" + "os" + "path/filepath" + "runtime" + "syscall" + "time" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +func OstreeSupport() bool { + return true +} + +func fixFiles(dir string, usermode bool) (bool, []string, error) { + var SkipOstree = errors.New("skip ostree deduplication") + + var whiteouts []string + + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + if !usermode { + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("not syscall.Stat_t") + } + + if stat.Rdev == 0 && (stat.Mode&unix.S_IFCHR) != 0 { + whiteouts = append(whiteouts, path) + return nil + } + } + // Skip the ostree deduplication if we encounter a file type that + // ostree does not manage. + return SkipOstree + } + if info.IsDir() { + if usermode { + if err := os.Chmod(path, info.Mode()|0700); err != nil { + return err + } + } + } else if usermode && (info.Mode().IsRegular()) { + if err := os.Chmod(path, info.Mode()|0600); err != nil { + return err + } + } + return nil + }) + if err == SkipOstree { + return true, nil, nil + } + if err != nil { + return false, nil, err + } + return false, whiteouts, nil +} + +// Create prepares the filesystem for the OSTREE driver and copies the directory for the given id under the parent. +func ConvertToOSTree(repoLocation, root, id string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + repo, err := otbuiltin.OpenRepo(repoLocation) + if err != nil { + return errors.Wrap(err, "could not open the OSTree repository") + } + + skip, whiteouts, err := fixFiles(root, os.Getuid() != 0) + if err != nil { + return errors.Wrap(err, "could not prepare the OSTree directory") + } + if skip { + return nil + } + + if _, err := repo.PrepareTransaction(); err != nil { + return errors.Wrap(err, "could not prepare the OSTree transaction") + } + + if skip { + return nil + } + + commitOpts := otbuiltin.NewCommitOptions() + commitOpts.Timestamp = time.Now() + commitOpts.LinkCheckoutSpeedup = true + commitOpts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + branch := fmt.Sprintf("containers-storage/%s", id) + + for _, w := range whiteouts { + if err := os.Remove(w); err != nil { + return errors.Wrap(err, "could not delete whiteout file") + } + } + + if _, err := repo.Commit(root, branch, commitOpts); err != nil { + return errors.Wrap(err, "could not commit the layer") + } + + if _, err := repo.CommitTransaction(); err != nil { + return errors.Wrap(err, "could not complete the OSTree transaction") + } + + if err := system.EnsureRemoveAll(root); err != nil { + return errors.Wrap(err, "could not delete layer") + } + + checkoutOpts := otbuiltin.NewCheckoutOptions() + checkoutOpts.RequireHardlinks = true + checkoutOpts.Whiteouts = false + if err := otbuiltin.Checkout(repoLocation, root, branch, checkoutOpts); err != nil { + return errors.Wrap(err, "could not checkout from OSTree") + } + + for _, w := range whiteouts { + if err := unix.Mknod(w, unix.S_IFCHR, 0); err != nil { + return errors.Wrap(err, "could not recreate whiteout file") + } + } + return nil +} + +func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + _, err := os.Stat(repoLocation) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + if err := idtools.MkdirAllAs(repoLocation, 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "could not create OSTree repository directory: %v") + } + + if _, err := otbuiltin.Init(repoLocation, otbuiltin.NewInitOptions()); err != nil { + return errors.Wrap(err, "could not create OSTree repository") + } + } + return nil +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +func DeleteOSTree(repoLocation, id string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + repo, err := openRepo(repoLocation) + if err != nil { + return err + } + defer C.g_object_unref(C.gpointer(repo)) + + branch := fmt.Sprintf("containers-storage/%s", id) + + cbranch := C.CString(branch) + defer C.free(unsafe.Pointer(cbranch)) + + var cerr *C.GError + r := glib.GoBool(glib.GBoolean(C.ostree_repo_set_ref_immediate(repo, nil, cbranch, nil, nil, &cerr))) + if !r { + return glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/stringutils/README.md b/vendor/github.com/containers/storage/pkg/stringutils/README.md new file mode 100644 index 00000000000..b3e454573c3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go new file mode 100644 index 00000000000..8c4c39875eb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -0,0 +1,99 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and an open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go index 6c63972682a..d306360520b 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -28,6 +28,20 @@ func (s StatT) Mtim() time.Time { return time.Time(s.mtim) } +// UID returns file's user id of owner. +// +// on windows this is always 0 because there is no concept of UID +func (s StatT) UID() uint32 { + return 0 +} + +// GID returns file's group id of owner. +// +// on windows this is always 0 because there is no concept of GID +func (s StatT) GID() uint32 { + return 0 +} + // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 088d9c0c5b7..870ccf7b0ab 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "path/filepath" + "reflect" "strconv" "strings" "sync" @@ -18,9 +19,11 @@ import ( "github.com/BurntSushi/toml" drivers "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/stringutils" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -346,6 +349,9 @@ type Store interface { // with an image. SetImageBigData(id, key string, data []byte) error + // ImageSize computes the size of the image's layers and ancillary data. + ImageSize(id string) (int64, error) + // ListContainerBigData retrieves a list of the (possibly large) chunks of // named data associated with a container. ListContainerBigData(id string) ([]string, error) @@ -366,6 +372,10 @@ type Store interface { // associated with a container. SetContainerBigData(id, key string, data []byte) error + // ContainerSize computes the size of the container's layer and ancillary + // data. Warning: this is a potentially expensive operation. + ContainerSize(id string) (int64, error) + // Layer returns a specific layer. Layer(id string) (*Layer, error) @@ -949,6 +959,106 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) } +func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, readWrite bool, rlstore LayerStore, lstores []ROLayerStore, options IDMappingOptions) (*Layer, error) { + layerMatchesMappingOptions := func(layer *Layer, options IDMappingOptions) bool { + // If we want host mapping, and the layer uses mappings, it's not the best match. + if options.HostUIDMapping && len(layer.UIDMap) != 0 { + return false + } + if options.HostGIDMapping && len(layer.GIDMap) != 0 { + return false + } + // If we don't care about the mapping, it's fine. + if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 { + return true + } + // Compare the maps. + return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) + } + var layer, parentLayer *Layer + var layerHomeStore ROLayerStore + // Locate the image's top layer and its parent, if it has one. + for _, store := range append([]ROLayerStore{rlstore}, lstores...) { + if store != rlstore { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + } + // Walk the top layer list. + for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if cLayer, err := store.Get(candidate); err == nil { + // We want the layer's parent, too, if it has one. + var cParentLayer *Layer + if cLayer.Parent != "" { + // Its parent should be around here, somewhere. + if cParentLayer, err = store.Get(cLayer.Parent); err != nil { + // Nope, couldn't find it. We're not going to be able + // to diff this one properly. + continue + } + } + // If the layer matches the desired mappings, it's a perfect match, + // so we're actually done here. + if layerMatchesMappingOptions(cLayer, options) { + return cLayer, nil + } + // Record the first one that we found, even if it's not ideal, so that + // we have a starting point. + if layer == nil { + layer = cLayer + parentLayer = cParentLayer + layerHomeStore = store + } + } + } + } + if layer == nil { + return nil, ErrLayerUnknown + } + // The top layer's mappings don't match the ones we want, but it's in a read-only + // image store, so we can't create and add a mapped copy of the layer to the image. + if !readWrite { + return layer, nil + } + // The top layer's mappings don't match the ones we want, and it's in an image store + // that lets us edit image metadata... + if istore, ok := ristore.(*imageStore); ok { + // ... so extract the layer's contents, create a new copy of it with the + // desired mappings, and register it as an alternate top layer in the image. + noCompression := archive.Uncompressed + diffOptions := DiffOptions{ + Compression: &noCompression, + } + rc, err := layerHomeStore.Diff("", layer.ID, &diffOptions) + if err != nil { + return nil, errors.Wrapf(err, "error reading layer %q to create an ID-mapped version of it") + } + defer rc.Close() + layerOptions := LayerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + }, + } + mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, rc) + if err != nil { + return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q") + } + if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { + if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { + err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2)) + } + return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID) + } + layer = mappedLayer + } + return layer, nil +} + func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { if options == nil { options = &ContainerOptions{} @@ -977,6 +1087,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat uidMap := options.UIDMap gidMap := options.GIDMap if image != "" { + var imageHomeStore ROImageStore istore, err := s.ImageStore() if err != nil { return nil, err @@ -994,6 +1105,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat } cimage, err = store.Get(image) if err == nil { + imageHomeStore = store break } } @@ -1006,22 +1118,9 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if err != nil { return nil, err } - var ilayer *Layer - for _, store := range append([]ROLayerStore{rlstore}, lstores...) { - if store != rlstore { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - } - ilayer, err = store.Get(cimage.TopLayer) - if err == nil { - break - } - } - if ilayer == nil { - return nil, ErrLayerUnknown + ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, imageHomeStore == istore, rlstore, lstores, options.IDMappingOptions) + if err != nil { + return nil, err } imageTopLayer = ilayer if !options.HostUIDMapping && len(options.UIDMap) == 0 { @@ -1279,6 +1378,200 @@ func (s *store) SetImageBigData(id, key string, data []byte) error { return ristore.SetBigData(id, key, data) } +func (s *store) ImageSize(id string) (int64, error) { + var image *Image + + lstore, err := s.LayerStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary layer store data") + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional layer stores") + } + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + } + + var imageStore ROBigDataStore + istore, err := s.ImageStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary image store data") + } + istores, err := s.ROImageStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional image stores") + } + + // Look for the image's record. + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + if image, err = store.Get(id); err == nil { + imageStore = store + break + } + } + if image == nil { + return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + + // Start with a list of the image's top layers. + queue := make(map[string]struct{}) + for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + queue[layerID] = struct{}{} + } + visited := make(map[string]struct{}) + // Walk all of the layers. + var size int64 + for len(visited) < len(queue) { + for layerID := range queue { + // Visit each layer only once. + if _, ok := visited[layerID]; ok { + continue + } + visited[layerID] = struct{}{} + // Look for the layer and the store that knows about it. + var layerStore ROLayerStore + var layer *Layer + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(layerID); err == nil { + layerStore = store + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID) + } + // The UncompressedSize is only valid if there's a digest to go with it. + n := layer.UncompressedSize + if layer.UncompressedDigest == "" { + // Compute the size. + n, err = layerStore.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID) + } + } + // Count this layer. + size += n + // Make a note to visit the layer's parent if we haven't already. + if layer.Parent != "" { + queue[layer.Parent] = struct{}{} + } + } + } + + // Count big data items. + names, err := imageStore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id) + } + for _, name := range names { + n, err := imageStore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id) + } + size += n + } + + return size, nil +} + +func (s *store) ContainerSize(id string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + } + + // Get the location of the container directory and container run directory. + // Do it before we lock the container store because they do, too. + cdir, err := s.ContainerDirectory(id) + if err != nil { + return -1, err + } + rdir, err := s.ContainerRunDirectory(id) + if err != nil { + return -1, err + } + + rcstore, err := s.ContainerStore() + if err != nil { + return -1, err + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + // Read the container record. + container, err := rcstore.Get(id) + if err != nil { + return -1, err + } + + // Read the container's layer's size. + var layer *Layer + var size int64 + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(container.LayerID); err == nil { + size, err = store.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID) + } + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID) + } + + // Count big data items. + names, err := rcstore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID) + } + for _, name := range names { + n, err := rcstore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id) + } + size += n + } + + // Count the size of our container directory and container run directory. + n, err := directory.Size(cdir) + if err != nil { + return -1, err + } + size += n + n, err = directory.Size(rdir) + if err != nil { + return -1, err + } + size += n + + return size, nil +} + func (s *store) ListContainerBigData(id string) ([]string, error) { rcstore, err := s.ContainerStore() if err != nil { @@ -1614,7 +1907,7 @@ func (s *store) DeleteLayer(id string) error { return err } for _, image := range images { - if image.TopLayer == id { + if image.TopLayer == id || stringutils.InSlice(image.MappedTopLayers, id) { return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID) } } @@ -1697,10 +1990,13 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) childrenByParent[parent] = &([]string{layer.ID}) } } - anyImageByTopLayer := make(map[string]string) + otherImagesByTopLayer := make(map[string]string) for _, img := range images { if img.ID != id { - anyImageByTopLayer[img.TopLayer] = img.ID + otherImagesByTopLayer[img.TopLayer] = img.ID + for _, layerID := range img.MappedTopLayers { + otherImagesByTopLayer[layerID] = img.ID + } } } if commit { @@ -1714,27 +2010,38 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if rcstore.Exists(layer) { break } - if _, ok := anyImageByTopLayer[layer]; ok { + if _, ok := otherImagesByTopLayer[layer]; ok { break } parent := "" if l, err := rlstore.Get(layer); err == nil { parent = l.Parent } - otherRefs := 0 - if childList, ok := childrenByParent[layer]; ok && childList != nil { - children := *childList - for _, child := range children { - if child != lastRemoved { - otherRefs++ + hasOtherRefs := func() bool { + layersToCheck := []string{layer} + if layer == image.TopLayer { + layersToCheck = append(layersToCheck, image.MappedTopLayers...) + } + for _, layer := range layersToCheck { + if childList, ok := childrenByParent[layer]; ok && childList != nil { + children := *childList + for _, child := range children { + if child != lastRemoved { + return true + } + } } } + return false } - if otherRefs != 0 { + if hasOtherRefs() { break } lastRemoved = layer layersToRemove = append(layersToRemove, lastRemoved) + if layer == image.TopLayer { + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + } layer = parent } } else { @@ -2293,7 +2600,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { return nil, err } for _, image := range imageList { - if image.TopLayer == layer.ID { + if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) { images = append(images, &image) } } @@ -2680,6 +2987,11 @@ type OptionsConfig struct { RemapGroup string `toml:"remap-group"` // Thinpool container options to be handed to thinpool drivers Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + // OSTree repository + OstreeRepo string `toml:"ostree_repo"` + + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home"` } // TOML-friendly explicit tables used for conversions. @@ -2770,6 +3082,12 @@ func init() { if config.Storage.Options.Size != "" { DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) } + if config.Storage.Options.OstreeRepo != "" { + DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.ostree_repo=%s", config.Storage.Driver, config.Storage.Options.OstreeRepo)) + } + if config.Storage.Options.SkipMountHome != "" { + DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome)) + } if config.Storage.Options.OverrideKernelCheck != "" { DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck)) } diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf index 54c78ab1cab..c0498a02d1f 100644 --- a/vendor/github.com/containers/storage/vendor.conf +++ b/vendor/github.com/containers/storage/vendor.conf @@ -20,3 +20,4 @@ github.com/tchap/go-patricia v2.2.6 github.com/vbatts/tar-split v0.10.2 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5 +github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460