From a4bad171cf5de8a34e348481522280b8ef798fa0 Mon Sep 17 00:00:00 2001 From: Valentin Rothberg Date: Fri, 5 Apr 2019 15:30:44 +0200 Subject: [PATCH] update containers/image ImageDestination.{PutBlob,TryReusingBlob} are extended to receive the index of the layer in the corresponding image. Some transports, containers storage in particular, require the layers to be comitted in sequence. The new sequencing allows containers storage to commit the layers in PutBlob. Signed-off-by: Valentin Rothberg --- pkg/blobcache/blobcache.go | 12 +- pkg/blobcache/blobcache_test.go | 6 +- vendor.conf | 2 +- .../github.com/containers/image/copy/copy.go | 189 ++++--- .../image/directory/directory_dest.go | 19 +- .../image/docker/docker_image_dest.go | 23 +- .../containers/image/docker/tarfile/dest.go | 21 +- .../containers/image/image/docker_schema2.go | 2 +- .../containers/image/oci/archive/oci_dest.go | 20 +- .../containers/image/oci/layout/oci_dest.go | 16 +- .../containers/image/openshift/openshift.go | 23 +- .../containers/image/ostree/ostree_dest.go | 16 +- .../image/pkg/blobinfocache/boltdb/boltdb.go | 2 +- .../image/pkg/blobinfocache/default.go | 12 +- .../image/pkg/docker/config/config.go | 15 - .../containers/image/pkg/progress/progress.go | 154 ++++++ .../containers/image/storage/storage_image.go | 463 ++++++++++++++---- .../image/storage/storage_transport.go | 11 +- .../containers/image/types/types.go | 18 +- .../github.com/containers/image/vendor.conf | 7 +- .../containers/image/version/version.go | 6 +- 21 files changed, 781 insertions(+), 256 deletions(-) create mode 100644 vendor/github.com/containers/image/pkg/progress/progress.go diff --git a/pkg/blobcache/blobcache.go b/pkg/blobcache/blobcache.go index 31e6a428c38..81faf2bdd59 100644 --- a/pkg/blobcache/blobcache.go +++ b/pkg/blobcache/blobcache.go @@ -18,7 +18,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -416,7 +416,7 @@ func (s *blobCacheDestination) HasThreadSafePutBlob() bool { return s.destination.HasThreadSafePutBlob() } -func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { +func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { var tempfile *os.File var err error var n int @@ -479,7 +479,7 @@ func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, in } } } - newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) + newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, layerIndexInImage, cache, isConfig) if err != nil { return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) } @@ -491,8 +491,8 @@ func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, in return newBlobInfo, nil } -func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) +func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, layerIndexInImage, cache, canSubstitute) if err != nil || present { return present, reusedInfo, err } @@ -502,7 +502,7 @@ func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.Bl f, err := os.Open(filename) if err == nil { defer f.Close() - uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) + uploadedInfo, err := d.destination.PutBlob(ctx, f, info, layerIndexInImage, cache, isConfig) if err != nil { return false, types.BlobInfo{}, err } diff --git a/pkg/blobcache/blobcache_test.go b/pkg/blobcache/blobcache_test.go index 7000050a295..d8619db15fd 100644 --- a/pkg/blobcache/blobcache_test.go +++ b/pkg/blobcache/blobcache_test.go @@ -21,7 +21,7 @@ import ( "github.com/containers/storage/pkg/archive" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -128,11 +128,11 @@ func TestBlobCache(t *testing.T) { if err != nil { t.Fatalf("error opening source image for writing: %v", err) } - _, err = destImage.PutBlob(context.TODO(), bytes.NewReader(blobBytes), blobInfo, none.NoCache, false) + _, err = destImage.PutBlob(context.TODO(), bytes.NewReader(blobBytes), blobInfo, 0, none.NoCache, false) if err != nil { t.Fatalf("error writing layer blob to source image: %v", err) } - _, err = destImage.PutBlob(context.TODO(), bytes.NewReader(configBytes), configInfo, none.NoCache, true) + _, err = destImage.PutBlob(context.TODO(), bytes.NewReader(configBytes), configInfo, 0, none.NoCache, true) if err != nil { t.Fatalf("error writing config blob to source image: %v", err) } diff --git a/vendor.conf b/vendor.conf index a77130acb5a..9f729202a09 100644 --- a/vendor.conf +++ b/vendor.conf @@ -3,7 +3,7 @@ github.com/blang/semver v3.5.0 github.com/BurntSushi/toml v0.2.0 github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d github.com/containernetworking/cni v0.7.0-rc2 -github.com/containers/image f52cf78ebfa1916da406f8b6210d8f7764ec1185 +github.com/containers/image commit https://github.com/vrothberg/image github.com/vbauerster/mpb v3.3.4 github.com/mattn/go-isatty v0.0.4 github.com/VividCortex/ewma v1.1.1 diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index 3ed8a2b8243..8c6f7595aa6 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -18,7 +18,9 @@ import ( "github.com/containers/image/manifest" "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/pkg/compression" + "github.com/containers/image/pkg/progress" "github.com/containers/image/signature" + "github.com/containers/image/storage" "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/klauspost/pgzip" @@ -26,7 +28,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb" - "github.com/vbauerster/mpb/decor" "golang.org/x/crypto/ssh/terminal" "golang.org/x/sync/semaphore" ) @@ -160,7 +161,6 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // If reportWriter is not a TTY (e.g., when piping to a file), do not // print the progress bars to avoid long and hard to parse output. - // createProgressBar() will print a single line instead. progressOutput := reportWriter if !isTTY(reportWriter) { progressOutput = ioutil.Discard @@ -454,10 +454,6 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { err error } - // copyGroup is used to determine if all layers are copied - copyGroup := sync.WaitGroup{} - copyGroup.Add(numLayers) - // copySemaphore is used to limit the number of parallel downloads to // avoid malicious images causing troubles and to be nice to servers. var copySemaphore *semaphore.Weighted @@ -467,43 +463,69 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { copySemaphore = semaphore.NewWeighted(int64(1)) } - data := make([]copyLayerData, numLayers) - copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) { - defer copySemaphore.Release(1) - defer copyGroup.Done() - cld := copyLayerData{} + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + + // progressPool for creating progress bars + progressPool := progress.NewPool(ctx, ic.c.progressOutput) + defer progressPool.CleanUp() + + // A context.WithCancel is needed when encountering an error while + // copying/downloading layers in parallel. + cancelCtx, cancelCopyLayer := context.WithCancel(ctx) + defer cancelCopyLayer() + + layerIndex := 0 // some layers might be skipped, so we need a dedicated counter + digestToCopyData := make(map[digest.Digest]*copyLayerData) + for _, srcLayer := range srcInfos { + // Check if we're already copying the layer + if _, ok := digestToCopyData[srcLayer.Digest]; ok { + continue + } + + cld := ©LayerData{} + digestToCopyData[srcLayer.Digest] = cld if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. if ic.diffIDsAreNeeded { - cld.err = errors.New("getting DiffID for foreign layers is unimplemented") - } else { - cld.destInfo = srcLayer - logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + return errors.New("getting DiffID for foreign layers is unimplemented") } - } else { - cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool) - } - data[index] = cld - } - - func() { // A scope for defer - progressPool, progressCleanup := ic.c.newProgressPool(ctx) - defer progressCleanup() + logrus.Debugf("Skipping foreign layer %q copy to %s", srcLayer.Digest, ic.c.dest.Reference().Transport().Name()) + cld.destInfo = srcLayer + continue // skip copying + } + + copySemaphore.Acquire(cancelCtx, 1) // limits parallel copy operations + copyGroup.Add(1) // allows the main routine to wait for all copy operations to finish + + // Copy the layer. Note that cld is a pointer; changes to it are + // propagated implicitly. + go func(index int, srcLayer types.BlobInfo, cld *copyLayerData) { + defer copySemaphore.Release(1) + defer copyGroup.Done() + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(cancelCtx, srcLayer, index, progressPool) + if cld.err != nil { + logrus.Errorf("copying layer %d failed: %v", index, cld.err) + // Stop all other running goroutines as we can't recover from an error + cancelCopyLayer() + } + }(layerIndex, srcLayer, cld) - for i, srcLayer := range srcInfos { - copySemaphore.Acquire(ctx, 1) - go copyLayerHelper(i, srcLayer, progressPool) - } + layerIndex++ + } - // Wait for all layers to be copied - copyGroup.Wait() - }() + // Wait for all layer-copy operations to finish + copyGroup.Wait() destInfos := make([]types.BlobInfo, numLayers) diffIDs := make([]digest.Digest, numLayers) - for i, cld := range data { + for i, srcLayer := range srcInfos { + cld, ok := digestToCopyData[srcLayer.Digest] + if !ok { + return errors.Errorf("no copy data for layer %q", srcLayer.Digest) + } if cld.err != nil { return cld.err } @@ -584,34 +606,6 @@ func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { } } -// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter -// is ioutil.Discard, the progress bar's output will be discarded -func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { - // shortDigestLen is the length of the digest used for blobs. - const shortDigestLen = 12 - - prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) - // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. - maxPrefixLen := len("Copying blob ") + shortDigestLen - if len(prefix) > maxPrefixLen { - prefix = prefix[:maxPrefixLen] - } - - bar := pool.AddBar(info.Size, - mpb.BarClearOnComplete(), - mpb.PrependDecorators( - decor.Name(prefix), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete), - ), - ) - if c.progressOutput == ioutil.Discard { - c.Printf("Copying %s %s\n", kind, info.Digest) - } - return bar -} - // copyConfig copies config.json, if any, from src to dest. func (c *copier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() @@ -622,14 +616,21 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { } destInfo, err := func() (types.BlobInfo, error) { // A scope for defer - progressPool, progressCleanup := c.newProgressPool(ctx) - defer progressCleanup() - bar := c.createProgressBar(progressPool, srcInfo, "config", "done") - destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) + pool := progress.NewPool(ctx, c.progressOutput) + defer pool.CleanUp() + + bar := pool.AddBar( + progress.DigestToCopyAction(srcInfo.Digest, "config"), + 0, + progress.BarOptions{ + RemoveOnCompletion: true, + StaticMessage: " ", + }) + + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, -1, nil, false, true, bar) if err != nil { return types.BlobInfo{}, err } - bar.SetTotal(int64(len(configBlob)), true) return destInfo, nil }() if err != nil { @@ -651,20 +652,37 @@ type diffIDResult struct { // copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, layerIndexInImage int, pool *progress.Pool) (types.BlobInfo, digest.Digest, error) { cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + progressBarAction := progress.DigestToCopyAction(srcInfo.Digest, "blob") + bar := pool.AddBar( + progressBarAction, + 0, + progress.BarOptions{ + RemoveOnCompletion: true, + StaticMessage: " ", + }) // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. if !diffIDIsNeeded { - reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) + reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, layerIndexInImage, ic.c.blobInfoCache, ic.canSubstituteBlobs, bar) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) } if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) - bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists") - bar.SetTotal(0, true) + // Instead of adding boilerplate code to ALL TryReusingBlob()s, just + // special case the storage transport, which is the only transport + // where we need control over the progress.Bar. + if ic.c.dest.Reference().Transport().Name() != storage.Name() { + bar.ReplaceBar( + progressBarAction, + 0, + progress.BarOptions{ + StaticMessage: "skipped: already exists", + }) + } return blobInfo, cachedDiffID, nil } } @@ -676,9 +694,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, po } defer srcStream.Close() - bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") - - blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar) + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, layerIndexInImage, diffIDIsNeeded, bar) if err != nil { return types.BlobInfo{}, "", err } @@ -700,7 +716,6 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, po } } - bar.SetTotal(srcInfo.Size, true) return blobInfo, diffID, nil } @@ -709,7 +724,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, po // perhaps compressing the stream if canCompress, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { + layerIndexInImage int, diffIDIsNeeded bool, bar *progress.Bar) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -733,7 +748,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea return pipeWriter } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, layerIndexInImage, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -768,9 +783,9 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, // perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied blob. -func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, +func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, layerIndexInImage int, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) { + canModifyBlob bool, isConfig bool, bar *progress.Bar) (types.BlobInfo, error) { // The copying happens through a pipeline of connected io.Readers. // === Input: srcStream @@ -793,7 +808,6 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } isCompressed := decompressor != nil - destStream = bar.ProxyReader(destStream) // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. @@ -846,8 +860,25 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } } + // Instead of adding boilerplate code to ALL PutBlob()s, just special case + // the storage transport, which is the only transport where we need control + // over the progress.Bar. + if c.dest.Reference().Transport().Name() != storage.Name() { + kind := "blob" + if isConfig { + kind = "config" + } + bar = bar.ReplaceBar( + progress.DigestToCopyAction(srcInfo.Digest, kind), + srcInfo.Size, + progress.BarOptions{ + OnCompletionMessage: "done", + }) + destStream = bar.ProxyReader(destStream) + } + // === Finally, send the layer stream to dest. - uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig) + uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, layerIndexInImage, c.blobInfoCache, isConfig, bar) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") } diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index 4b2ab022e24..e0b5b94e6d8 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" + "github.com/containers/image/pkg/progress" "github.com/containers/image/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -129,14 +130,20 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool { return false } -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. // May update cache. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Non-layer blobs (e.g., configs or throwaway layers) must have a value < 0. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { +func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool, bar *progress.Bar) (types.BlobInfo, error) { blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") if err != nil { return types.BlobInfo{}, err @@ -178,11 +185,15 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. -func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool, bar *progress.Bar) (bool, types.BlobInfo, error) { if info.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) } diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index c116cbec32b..3fdc396cff9 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -17,9 +17,10 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" "github.com/containers/image/pkg/blobinfocache/none" + "github.com/containers/image/pkg/progress" "github.com/containers/image/types" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" + v2 "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -117,19 +118,25 @@ func (d *dockerImageDestination) HasThreadSafePutBlob() bool { return true } -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. // May update cache. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Non-layer blobs (e.g., configs or throwaway layers) must have a value < 0. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { +func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool, bar *progress.Bar) (types.BlobInfo, error) { if inputInfo.Digest.String() != "" { // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. - haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false) + haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, layerIndexInImage, none.NoCache, false, nil) if err != nil { return types.BlobInfo{}, err } @@ -267,11 +274,15 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. -func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool, bar *progress.Bar) (bool, types.BlobInfo, error) { if info.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) } diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index 5f30eddbc72..1be485e7c17 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -15,6 +15,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/internal/tmpdir" "github.com/containers/image/manifest" + "github.com/containers/image/pkg/progress" "github.com/containers/image/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -87,14 +88,20 @@ func (d *Destination) HasThreadSafePutBlob() bool { return false } -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. // May update cache. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Non-layer blobs (e.g., configs or throwaway layers) must have a value < 0. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { +func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool, bar *progress.Bar) (types.BlobInfo, error) { // Ouch, we need to stream the blob into a temporary file just to determine the size. // When the layer is decompressed, we also have to generate the digest on uncompressed datas. if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { @@ -126,7 +133,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t } // Maybe the blob has been already sent - ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false) + ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, layerIndexInImage, cache, false, nil) if err != nil { return types.BlobInfo{}, err } @@ -160,11 +167,15 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. -func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool, bar *progress.Bar) (bool, types.BlobInfo, error) { if info.Digest == "" { return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") } diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index 351e73ea1da..924ec3671a3 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -252,7 +252,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ logrus.Debugf("Uploading empty layer during conversion to schema 1") // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false) + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, -1, none.NoCache, false, nil) if err != nil { return nil, errors.Wrap(err, "Error uploading empty layer") } diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go index 9571c37e2ba..2596e53deae 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go @@ -5,6 +5,7 @@ import ( "io" "os" + "github.com/containers/image/pkg/progress" "github.com/containers/image/types" "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" @@ -87,22 +88,31 @@ func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Non-layer blobs (e.g., configs or throwaway layers) must have a value < 0. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) +func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool, bar *progress.Bar) (types.BlobInfo, error) { + return d.unpackedDest.PutBlob(ctx, stream, inputInfo, layerIndexInImage, cache, isConfig, bar) } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. -func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) +func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool, bar *progress.Bar) (bool, types.BlobInfo, error) { + return d.unpackedDest.TryReusingBlob(ctx, info, layerIndexInImage, cache, canSubstitute, bar) } // PutManifest writes manifest to the destination diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go index db102184dbc..39ae0277305 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -10,6 +10,7 @@ import ( "runtime" "github.com/containers/image/manifest" + "github.com/containers/image/pkg/progress" "github.com/containers/image/types" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" @@ -117,10 +118,15 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool { // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Non-layer blobs (e.g., configs or throwaway layers) must have a value < 0. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { +func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool, bar *progress.Bar) (types.BlobInfo, error) { blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err @@ -183,11 +189,15 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. -func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool, bar *progress.Bar) (bool, types.BlobInfo, error) { if info.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) } diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 814c3eea1de..3b50501e272 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -15,6 +15,7 @@ import ( "github.com/containers/image/docker" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" + "github.com/containers/image/pkg/progress" "github.com/containers/image/types" "github.com/containers/image/version" "github.com/opencontainers/go-digest" @@ -388,26 +389,36 @@ func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { return false } -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. // May update cache. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Non-layer blobs (e.g., configs or throwaway layers) must have a value < 0. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) +func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool, bar *progress.Bar) (types.BlobInfo, error) { + return d.docker.PutBlob(ctx, stream, inputInfo, layerIndexInImage, cache, isConfig, bar) } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. -func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) +func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool, bar *progress.Bar) (bool, types.BlobInfo, error) { + return d.docker.TryReusingBlob(ctx, info, layerIndexInImage, cache, canSubstitute, bar) } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go index d69f4fa331a..292a1cb2c66 100644 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/ostree/ostree_dest.go @@ -21,6 +21,7 @@ import ( "unsafe" "github.com/containers/image/manifest" + "github.com/containers/image/pkg/progress" "github.com/containers/image/types" "github.com/containers/storage/pkg/archive" "github.com/klauspost/pgzip" @@ -142,10 +143,15 @@ func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Non-layer blobs (e.g., configs or throwaway layers) must have a value < 0. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { +func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool, bar *progress.Bar) (types.BlobInfo, error) { tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") if err != nil { return types.BlobInfo{}, err @@ -338,11 +344,15 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. -func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool, bar *progress.Bar) (bool, types.BlobInfo, error) { if d.repo == nil { repo, err := openRepo(d.ref.repo) if err != nil { diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go index 91d4e913750..19d0a6c80e5 100644 --- a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go +++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go @@ -7,9 +7,9 @@ import ( "sync" "time" - "github.com/boltdb/bolt" "github.com/containers/image/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/types" + bolt "github.com/etcd-io/bbolt" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go index 1e6e543b2fe..357333215df 100644 --- a/vendor/github.com/containers/image/pkg/blobinfocache/default.go +++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "github.com/containers/image/pkg/blobinfocache/boltdb" "github.com/containers/image/pkg/blobinfocache/memory" @@ -47,9 +48,18 @@ func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { return filepath.Join(dataDir, "containers", "cache"), nil } +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + // DefaultCache returns the default BlobInfoCache implementation appropriate for sys. func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { - dir, err := blobInfoCacheDir(sys, os.Geteuid()) + dir, err := blobInfoCacheDir(sys, getRootlessUID()) if err != nil { logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) return memory.New() diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go index 1f576253dc1..57b548e2605 100644 --- a/vendor/github.com/containers/image/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/pkg/docker/config/config.go @@ -85,21 +85,6 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin return "", "", nil } -// GetUserLoggedIn returns the username logged in to registry from either -// auth.json or XDG_RUNTIME_DIR -// Used to tell the user if someone is logged in to the registry when logging in -func GetUserLoggedIn(sys *types.SystemContext, registry string) (string, error) { - path, err := getPathToAuth(sys) - if err != nil { - return "", err - } - username, _, _ := findAuthentication(registry, path, false) - if username != "" { - return username, nil - } - return "", nil -} - // RemoveAuthentication deletes the credentials stored in auth.json func RemoveAuthentication(sys *types.SystemContext, registry string) error { return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { diff --git a/vendor/github.com/containers/image/pkg/progress/progress.go b/vendor/github.com/containers/image/pkg/progress/progress.go new file mode 100644 index 00000000000..2972e489e0d --- /dev/null +++ b/vendor/github.com/containers/image/pkg/progress/progress.go @@ -0,0 +1,154 @@ +// Package progress is exposes a convenience API and abstractions for creating +// and managing pools of multi-progress bars. +package progress + +import ( + "context" + "fmt" + "io" + + "github.com/opencontainers/go-digest" + "github.com/vbauerster/mpb" + "github.com/vbauerster/mpb/decor" +) + +// Pool allows managing progress bars. +type Pool struct { + pool *mpb.Progress + cancel context.CancelFunc + writer io.Writer +} + +// Bar represents a progress bar. +type Bar struct { + bar *mpb.Bar + pool *Pool +} + +// BarOptions includes various options to control AddBar. +type BarOptions struct { + // RemoveOnCompletion the bar on completion. This must be true if the bar + // will be replaced by another one. + RemoveOnCompletion bool + // OnCompletionMessage will be shown on completion and replace the progress + // bar. Note that setting OnCompletionMessage will cause the progress bar + // (or the static message) to be cleared on completion. + OnCompletionMessage string + // ReplaceBar is the bar to replace. + ReplaceBar *Bar + // StaticMessage, if set, will replace displaying the progress bar. Use this + // field for static progress bars that do not show progress. + StaticMessage string +} + +// NewPool returns a Pool. The caller must eventually call ProgressPoo.CleanUp() +// when the pool will no longer be updated. +func NewPool(ctx context.Context, writer io.Writer) *Pool { + ctx, cancelFunc := context.WithCancel(ctx) + return &Pool{ + pool: mpb.New(mpb.WithWidth(40), mpb.WithOutput(writer), mpb.WithContext(ctx)), + cancel: cancelFunc, + writer: writer, + } +} + +// CleanUp cleans up resources, such as remaining progress bars, and stops them +// if necessary. +func (p *Pool) CleanUp() { + p.cancel() + p.pool.Wait() +} + +// getWriter returns the Pool's writer. +func (p *Pool) getWriter() io.Writer { + return p.writer +} + +// DigestToCopyAction returns a string based on the blobinfo and kind. +// It's a convenience function for the c/image library when copying images. +func DigestToCopyAction(digest digest.Digest, kind string) string { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + const maxLen = len("Copying blob ") + shortDigestLen + // Truncate the string (chopping of some part of the digest) to make all + // progress bars aligned in a column. + copyAction := fmt.Sprintf("Copying %s %s", kind, digest.Encoded()) + if len(copyAction) > maxLen { + copyAction = copyAction[:maxLen] + } + return copyAction +} + +// AddBar adds a new Bar to the Pool. Use options to control the behavior and +// appearance of the bar. +func (p *Pool) AddBar(action string, size int64, options BarOptions) *Bar { + var bar *mpb.Bar + + // First decorator showing action (e.g., "Copying blob 123456abcd") + mpbOptions := []mpb.BarOption{ + mpb.PrependDecorators( + decor.Name(action), + ), + } + + if options.RemoveOnCompletion { + mpbOptions = append(mpbOptions, mpb.BarRemoveOnComplete()) + } + + if options.ReplaceBar != nil { + mpbOptions = append(mpbOptions, mpb.BarReplaceOnComplete(options.ReplaceBar.bar)) + // bar.SetTotal(0, true) will make sure that the bar is stopped + defer options.ReplaceBar.bar.SetTotal(0, true) + } + + // If no static message is set, we display the progress bar. Otherwise, + // we'll display the message only. + if options.StaticMessage == "" { + mpbOptions = append(mpbOptions, + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+options.OnCompletionMessage), + ), + ) + mpbOptions = append(mpbOptions, mpb.BarClearOnComplete()) + bar = p.pool.AddBar(size, mpbOptions...) + return &Bar{ + bar: bar, + pool: p, + } + } + + barFiller := mpb.FillerFunc( + func(w io.Writer, width int, st *decor.Statistics) { + fmt.Fprint(w, options.StaticMessage) + }) + + // If OnCompletionMessage is set, we need to add the decorator and clear + // the bar on completion. + if options.OnCompletionMessage != "" { + mpbOptions = append(mpbOptions, + mpb.AppendDecorators( + decor.OnComplete(decor.Name(""), " "+options.OnCompletionMessage), + ), + ) + mpbOptions = append(mpbOptions, mpb.BarClearOnComplete()) + } + + bar = p.pool.Add(size, barFiller, mpbOptions...) + return &Bar{ + bar: bar, + pool: p, + } +} + +// ReplaceBar is like Pool.AddBar but replace the bar in it's pool. Note that +// the bar be terminated and should have been created with +// options.RemoveOnCompletion. +func (b *Bar) ReplaceBar(action string, size int64, options BarOptions) *Bar { + options.ReplaceBar = b + return b.pool.AddBar(action, size, options) +} + +// ProxyReader wraps the reader with metrics for progress tracking. +func (b *Bar) ProxyReader(reader io.Reader) io.ReadCloser { + return b.bar.ProxyReader(reader) +} diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index b39d2bcc04a..268d102a944 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -13,12 +13,14 @@ import ( "path/filepath" "sync" "sync/atomic" + "time" "github.com/containers/image/docker/reference" "github.com/containers/image/image" "github.com/containers/image/internal/tmpdir" "github.com/containers/image/manifest" "github.com/containers/image/pkg/blobinfocache/none" + "github.com/containers/image/pkg/progress" "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" @@ -54,16 +56,19 @@ type storageImageSource struct { } type storageImageDestination struct { - imageRef storageReference - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - signatures []byte // Signature contents, temporary - putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + blobLayerIDs map[digest.Digest]string // Mapping from layer blobsums to their corresponding storage layer ID + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + indexToStorageID map[int]string // Mapping from layer index to the layer IDs in the storage + indexToDoneChannel map[int]chan bool // Mapping from layer index to a channel to indicate the layer has been written to storage + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } type storageImageCloser struct { @@ -323,16 +328,31 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e return nil, errors.Wrapf(err, "error creating a temporary directory") } image := &storageImageDestination{ - imageRef: imageRef, - directory: directory, - blobDiffIDs: make(map[digest.Digest]digest.Digest), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, + imageRef: imageRef, + directory: directory, + blobDiffIDs: make(map[digest.Digest]digest.Digest), + blobLayerIDs: make(map[digest.Digest]string), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + indexToStorageID: make(map[int]string), + indexToDoneChannel: make(map[int]chan bool), + SignatureSizes: []int{}, } return image, nil } +func (s *storageImageDestination) getChannelForLayer(layerIndexInImage int) chan bool { + s.putBlobMutex.Lock() + defer s.putBlobMutex.Unlock() + channel, ok := s.indexToDoneChannel[layerIndexInImage] + if !ok { + // A buffered channel to allow non-blocking sends + channel = make(chan bool, 1) + s.indexToDoneChannel[layerIndexInImage] = channel + } + return channel +} + // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (s *storageImageDestination) Reference() types.ImageReference { @@ -360,15 +380,181 @@ func (s *storageImageDestination) HasThreadSafePutBlob() bool { return true } +// tryReusingBlobFromOtherProcess is implementing a mechanism to detect if +// another process is already copying the blobinfo by making use of the +// blob-digest locks from containers/storage. The caller is expected to send a +// message through the done channel once the lock has been acquired. If we +// detect that another process is copying the blob, we wait until we own the +// lockfile and call tryReusingBlob() to check if we can reuse the committed +// layer. +func (s *storageImageDestination) tryReusingBlobFromOtherProcess(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, done chan bool, bar *progress.Bar) (bool, types.BlobInfo, error) { + copiedByAnotherProcess := false + select { + case <-done: + case <-time.After(200 * time.Millisecond): + logrus.Debugf("blob %q is being copied by another process", blobinfo.Digest) + copiedByAnotherProcess = true + if bar != nil { + bar = bar.ReplaceBar( + progress.DigestToCopyAction(blobinfo.Digest, "blob"), + blobinfo.Size, + progress.BarOptions{ + StaticMessage: "paused: being copied by another process", + RemoveOnCompletion: true, + }) + } + // Wait until we acquired the lock or encountered an error. + <-done + } + + // Now, we own the lock. + // + // In case another process copied the blob, we should attempt reusing the + // blob. If it's not available, either the copy-detection heuristic failed + // (i.e., waiting 200 ms was not enough) or the other process failed to copy + // the blob. + if copiedByAnotherProcess { + reusable, blob, err := s.tryReusingBlob(ctx, blobinfo, layerIndexInImage, cache, false) + // If we can reuse the blob or hit an error trying to do so, we need to + // signal the result through the channel as another Goroutine is potentially + // waiting for it. If we can't resuse the blob and encountered no error, we + // need to copy it and will send the signal in PutBlob(). + if reusable { + logrus.Debugf("another process successfully copied blob %q", blobinfo.Digest) + if bar != nil { + bar = bar.ReplaceBar( + progress.DigestToCopyAction(blobinfo.Digest, "blob"), + blobinfo.Size, + progress.BarOptions{ + StaticMessage: "done: copied by another process", + }) + } + } else { + logrus.Debugf("another process must have failed copying blob %q", blobinfo.Digest) + } + return reusable, blob, err + } + + return false, types.BlobInfo{}, nil +} + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Non-layer blobs (e.g., configs or throwaway layers) must have a value < 0. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { +func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool, bar *progress.Bar) (blob types.BlobInfo, err error) { + // Deferred call to an anonymous func to signal potentially waiting + // goroutines via the index-specific channel. + defer func() { + // No need to wait + if layerIndexInImage >= 0 { + // It's a buffered channel, so we don't wait for the message to be + // received + channel := s.getChannelForLayer(layerIndexInImage) + channel <- err == nil + if err != nil { + logrus.Errorf("error while committing blob %d: %v", layerIndexInImage, err) + } + } + }() + + waitAndCommit := func(blob types.BlobInfo, err error) (types.BlobInfo, error) { + // First, wait for the previous layer to be committed + previousID := "" + if layerIndexInImage > 0 { + channel := s.getChannelForLayer(layerIndexInImage - 1) + if committed := <-channel; !committed { + err := fmt.Errorf("committing previous layer %d failed", layerIndexInImage-1) + logrus.Errorf(err.Error()) + return types.BlobInfo{}, err + } + var ok bool + s.putBlobMutex.Lock() + previousID, ok = s.indexToStorageID[layerIndexInImage-1] + s.putBlobMutex.Unlock() + if !ok { + return types.BlobInfo{}, fmt.Errorf("error committing blob %q: could not find parent layer ID", blob.Digest.String()) + } + } + + // Commit the blob + if layerIndexInImage >= 0 { + id, err := s.commitBlob(ctx, blob, previousID) + if err == nil { + s.putBlobMutex.Lock() + s.blobLayerIDs[blob.Digest] = id + s.indexToStorageID[layerIndexInImage] = id + s.putBlobMutex.Unlock() + } else { + return types.BlobInfo{}, err + } + } + return blob, nil + } + + // Check if another process is already copying the blob. Please refer to the + // doc of tryReusingBlobFromOtherProcess for further information. + if layerIndexInImage >= 0 { + // The reasoning for getting the locker here is to make the code paths + // of locking and unlocking it more obvious and simple. + locker, err := s.imageRef.transport.store.GetDigestLock(blobinfo.Digest) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "error acquiring lock for blob %q", blobinfo.Digest) + } + done := make(chan bool, 1) + defer locker.Unlock() + go func() { + locker.Lock() + done <- true + }() + reusable, blob, err := s.tryReusingBlobFromOtherProcess(ctx, stream, blobinfo, layerIndexInImage, cache, done, bar) + if err != nil { + return blob, err + } + if reusable { + return waitAndCommit(blob, err) + } + } + + if bar != nil { + kind := "blob" + message := "" + if isConfig { + kind = "config" + // Setting the StaticMessage for the config avoids displaying a + // progress bar. Configs are comparatively small and quickly + // downloaded, such that displaying a progress is more a distraction + // than an indicator. + message = " " + } + bar = bar.ReplaceBar( + progress.DigestToCopyAction(blobinfo.Digest, kind), + blobinfo.Size, + progress.BarOptions{ + StaticMessage: message, + OnCompletionMessage: "done", + }) + stream = bar.ProxyReader(stream) + } + + blob, err = s.putBlob(ctx, stream, blobinfo, layerIndexInImage, cache, isConfig) + if err != nil { + return types.BlobInfo{}, err + } + + return waitAndCommit(blob, err) +} + +func (s *storageImageDestination) putBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { // Stores a layer or data blob in our temporary directory, checking that any information // in the blobinfo matches the incoming data. errorBlobInfo := types.BlobInfo{ @@ -425,21 +611,17 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, } // This is safe because we have just computed both values ourselves. cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) - return types.BlobInfo{ + blob := types.BlobInfo{ Digest: blobDigest, Size: blobSize, MediaType: blobinfo.MediaType, - }, nil + } + + return blob, nil } -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +// tryReusingBlob is a helper method for TryReusingBlob to wrap it +func (s *storageImageDestination) tryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { // lock the entire method as it executes fairly quickly s.putBlobMutex.Lock() defer s.putBlobMutex.Unlock() @@ -467,6 +649,10 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t if len(layers) > 0 { // Save this for completeness. s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + s.blobLayerIDs[blobinfo.Digest] = layers[0].ID + if layerIndexInImage >= 0 { + s.indexToStorageID[layerIndexInImage] = layers[0].ID + } return true, types.BlobInfo{ Digest: blobinfo.Digest, Size: layers[0].UncompressedSize, @@ -482,6 +668,10 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t if len(layers) > 0 { // Record the uncompressed value so that we can use it to calculate layer IDs. s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + s.blobLayerIDs[blobinfo.Digest] = layers[0].ID + if layerIndexInImage >= 0 { + s.indexToStorageID[layerIndexInImage] = layers[0].ID + } return true, types.BlobInfo{ Digest: blobinfo.Digest, Size: layers[0].CompressedSize, @@ -500,6 +690,10 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t } if len(layers) > 0 { s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + s.blobLayerIDs[blobinfo.Digest] = layers[0].ID + if layerIndexInImage >= 0 { + s.indexToStorageID[layerIndexInImage] = layers[0].ID + } return true, types.BlobInfo{ Digest: uncompressedDigest, Size: layers[0].UncompressedSize, @@ -513,6 +707,38 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t return false, types.BlobInfo{}, nil } +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of +// PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. +// Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. +// Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, layerIndexInImage int, cache types.BlobInfoCache, canSubstitute bool, bar *progress.Bar) (bool, types.BlobInfo, error) { + reusable, blob, err := s.tryReusingBlob(ctx, blobinfo, layerIndexInImage, cache, canSubstitute) + // If we can reuse the blob or hit an error trying to do so, we need to + // signal the result through the channel as another Goroutine is potentially + // waiting for it. If we can't resuse the blob and encountered no error, we + // need to copy it and will send the signal in PutBlob(). + if (layerIndexInImage >= 0) && (err != nil || reusable) { + channel := s.getChannelForLayer(layerIndexInImage) + channel <- (err == nil) + } + if bar != nil && reusable { + bar.ReplaceBar( + progress.DigestToCopyAction(blobinfo.Digest, "blob"), + 0, + progress.BarOptions{ + StaticMessage: "skipped: already exists", + }) + } + return reusable, blob, err +} + // computeID computes a recommended image ID based on information we have so far. If // the manifest is not of a type that we recognize, we return an empty value, indicating // that since we don't have a recommendation, a random ID should be used if one needs @@ -572,6 +798,102 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er return nil, errors.New("blob not found") } +// commitBlobs commits the specified blob to the storage. If not already done, +// it will block until the parent layer is committed to the storage. Note that +// the only caller of commitBlob() is PutBlob(), which is recording the results +// and send the error through the corresponding channel in s.previousLayerResult. +func (s *storageImageDestination) commitBlob(ctx context.Context, blob types.BlobInfo, previousID string) (string, error) { + logrus.Debugf("committing blob %q", blob.Digest) + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + s.putBlobMutex.Lock() + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + s.putBlobMutex.Unlock() + if !haveDiffID { + return "", errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + + id := diffID.Hex() + if previousID != "" { + id = digest.Canonical.FromBytes([]byte(previousID + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + logrus.Debugf("layer for blob %q already found in storage", blob.Digest) + return layer.ID, nil + } + // Check if we previously cached a file with that blob's contents. If we didn't, + // then we need to read the desired contents from a layer. + s.putBlobMutex.Lock() + filename, ok := s.filenames[blob.Digest] + s.putBlobMutex.Unlock() + if !ok { + // Try to find the layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return "", errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) + } + // Read the layer's contents. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return "", errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) + } + // Copy the layer diff to a file. Diff() takes a lock that it holds + // until the ReadCloser that it returns is closed, and PutLayer() wants + // the same lock, so the diff can't just be directly streamed from one + // to the other. + filename = s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + diff.Close() + return "", errors.Wrapf(err, "error creating temporary file %q", filename) + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using + // ctx.Done(). + _, err = io.Copy(file, diff) + diff.Close() + file.Close() + if err != nil { + return "", errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Make sure that we can find this file later, should we need the layer's + // contents again. + s.putBlobMutex.Lock() + s.filenames[blob.Digest] = filename + s.putBlobMutex.Unlock() + } + // Read the cached blob and use it as a diff. + file, err := os.Open(filename) + if err != nil { + return "", errors.Wrapf(err, "error opening file %q", filename) + } + defer file.Close() + // Build the new layer using the diff, regardless of where it came from. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + layer, _, err := s.imageRef.transport.store.PutLayer(id, previousID, nil, "", false, nil, file) + if err != nil && errors.Cause(err) != storage.ErrDuplicateID { + return "", errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + } + return layer.ID, nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (s *storageImageDestination) Commit(ctx context.Context) error { // Find the list of layer blobs. if len(s.manifest) == 0 { @@ -581,6 +903,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "error parsing manifest") } + layerBlobs := man.LayerInfos() // Extract or find the layers. lastLayer := "" @@ -588,10 +911,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { if blob.EmptyLayer { continue } - - // Check if there's already a layer with the ID that we'd give to the result of applying - // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + _, haveDiffID := s.blobDiffIDs[blob.Digest] if !haveDiffID { // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), // or to even check if we had it. @@ -600,90 +920,27 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only // so far we are going to accommodate that (if we should be doing that at all). logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) + has, _, err := s.tryReusingBlob(ctx, blob.BlobInfo, -1, none.NoCache, false) if err != nil { return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) } if !has { return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) } - diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + _, haveDiffID = s.blobDiffIDs[blob.Digest] if !haveDiffID { return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) } } - id := diffID.Hex() - if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() - } - if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { - // There's already a layer that should have the right contents, just reuse it. - lastLayer = layer.ID - continue - } - // Check if we previously cached a file with that blob's contents. If we didn't, - // then we need to read the desired contents from a layer. - filename, ok := s.filenames[blob.Digest] - if !ok { - // Try to find the layer with contents matching that blobsum. - layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } - } - if layer == "" { - return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) - } - // Read the layer's contents. - noCompression := archive.Uncompressed - diffOptions := &storage.DiffOptions{ - Compression: &noCompression, - } - diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) - if err2 != nil { - return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) - } - // Copy the layer diff to a file. Diff() takes a lock that it holds - // until the ReadCloser that it returns is closed, and PutLayer() wants - // the same lock, so the diff can't just be directly streamed from one - // to the other. - filename = s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) - if err != nil { - diff.Close() - return errors.Wrapf(err, "error creating temporary file %q", filename) - } - // Copy the data to the file. - // TODO: This can take quite some time, and should ideally be cancellable using - // ctx.Done(). - _, err = io.Copy(file, diff) - diff.Close() - file.Close() - if err != nil { - return errors.Wrapf(err, "error storing blob to file %q", filename) - } - // Make sure that we can find this file later, should we need the layer's - // contents again. - s.filenames[blob.Digest] = filename - } - // Read the cached blob and use it as a diff. - file, err := os.Open(filename) + newID, err := s.commitBlob(ctx, blob.BlobInfo, lastLayer) if err != nil { - return errors.Wrapf(err, "error opening file %q", filename) - } - defer file.Close() - // Build the new layer using the diff, regardless of where it came from. - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + return err } - lastLayer = layer.ID + lastLayer = newID + } + + if lastLayer == "" { + return fmt.Errorf("could not find top layer") } // If one of those blobs was a configuration blob, then we can try to dig out the date when the image diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index 3a6be6e0017..70c18640e4d 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -4,7 +4,6 @@ package storage import ( "fmt" - "os" "path/filepath" "strings" @@ -39,6 +38,12 @@ var ( ErrPathNotAbsolute = errors.New("path name is not absolute") ) +// Name returns the name of the transport and can be used for type detection of +// a Transport object. +func Name() string { + return "containers-storage" +} + // StoreTransport is an ImageTransport that uses a storage.Store to parse // references, either its own default or one that it's told to use. type StoreTransport interface { @@ -72,7 +77,7 @@ type storageTransport struct { func (s *storageTransport) Name() string { // Still haven't really settled on a name. - return "containers-storage" + return Name() } // SetStore sets the Store object which the Transport will use for parsing @@ -181,7 +186,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) { // Return the transport's previously-set store. If we don't have one // of those, initialize one now. if s.store == nil { - options, err := storage.DefaultStoreOptions(os.Getuid() != 0, os.Getuid()) + options, err := storage.DefaultStoreOptionsAutoDetectUID() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 9fdab2314a4..447542b42bf 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -6,8 +6,9 @@ import ( "time" "github.com/containers/image/docker/reference" + "github.com/containers/image/pkg/progress" "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) // ImageTransport is a top-level namespace for ways to to store/load an image. @@ -262,20 +263,29 @@ type ImageDestination interface { // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. + // layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of + // PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. + // Non-layer blobs (e.g., configs or throwaway layers) must have a value < 0. + // Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. + // Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, layerIndexInImage int, cache BlobInfoCache, isConfig bool, bar *progress.Bar) (BlobInfo, error) // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. HasThreadSafePutBlob() bool // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. + // layerIndexInImage must be properly set to the layer index of the corresponding blob in the image. This value is required to allow parallel executions of + // PutBlob() and TryReusingBlob() where the layers must be written to the backend storage in sequential order. A value >= indicates that the blob a layer. + // Note that only the containers-storage destination is sensitive to the layerIndexInImage parameter. Other transport destinations ignore it. + // Same applies to bar, which is used in the containers-storage destination to update the progress bars displayed in the terminal. If it's nil, it will be ignored. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. - // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. - TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) + TryReusingBlob(ctx context.Context, info BlobInfo, layerIndexInImage int, cache BlobInfoCache, canSubstitute bool, bar *progress.Bar) (bool, BlobInfo, error) // PutManifest writes manifest to the destination. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index 89b29722b98..f8ad0f2496c 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -1,7 +1,7 @@ github.com/containers/image github.com/sirupsen/logrus v1.0.0 -github.com/containers/storage v1.12.1 +github.com/containers/storage v1.12.2 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 @@ -13,7 +13,6 @@ github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453 github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade -github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 @@ -43,10 +42,10 @@ github.com/syndtr/gocapability master github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175 github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a github.com/ulikunitz/xz v0.5.4 -github.com/boltdb/bolt master +github.com/etcd-io/bbolt v1.3.2 github.com/klauspost/pgzip v1.2.1 github.com/klauspost/compress v1.4.1 github.com/klauspost/cpuid v1.2.0 -github.com/vbauerster/mpb v3.3.4 +github.com/vbauerster/mpb v3.4.0 github.com/mattn/go-isatty v0.0.4 github.com/VividCortex/ewma v1.1.1 diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go index 9915cb2faed..184274736df 100644 --- a/vendor/github.com/containers/image/version/version.go +++ b/vendor/github.com/containers/image/version/version.go @@ -4,11 +4,11 @@ import "fmt" const ( // VersionMajor is for an API incompatible changes - VersionMajor = 0 + VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 + VersionMinor = 7 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 6 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "-dev"