diff --git a/vendor.conf b/vendor.conf index a323a7ede2..51c51ca075 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,5 +1,5 @@ github.com/urfave/cli v1.17.0 -github.com/containers/image master +github.com/containers/image storage-update https://github.com/nalind/image github.com/opencontainers/go-digest master gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb github.com/containers/storage master diff --git a/vendor/github.com/containers/image/docker/archive/dest.go b/vendor/github.com/containers/image/docker/archive/dest.go index 1436069951..9fc85bd85b 100644 --- a/vendor/github.com/containers/image/docker/archive/dest.go +++ b/vendor/github.com/containers/image/docker/archive/dest.go @@ -19,15 +19,24 @@ func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types. if ref.destinationRef == nil { return nil, errors.Errorf("docker-archive: destination reference not supplied (must be of form :)") } - fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_EXCL|os.O_CREATE, 0644) + + // ref.path can be either a pipe or a regular file + // in the case of a pipe, we require that we can open it for write + // in the case of a regular file, we don't want to overwrite any pre-existing file + // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, + // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) + fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, errors.Wrapf(err, "error opening file %q", ref.path) + } + + fhStat, err := fh.Stat() if err != nil { - // FIXME: It should be possible to modify archives, but the only really - // sane way of doing it is to create a copy of the image, modify - // it and then do a rename(2). - if os.IsExist(err) { - err = errors.New("docker-archive doesn't support modifying existing images") - } - return nil, err + return nil, errors.Wrapf(err, "error statting file %q", ref.path) + } + + if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { + return nil, errors.New("docker-archive doesn't support modifying existing images") } return &archiveImageDestination{ diff --git a/vendor/github.com/containers/image/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/signature/mechanism_openpgp.go index e383bde3e6..eccd610c9d 100644 --- a/vendor/github.com/containers/image/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/signature/mechanism_openpgp.go @@ -132,11 +132,17 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [ if md.SignedBy == nil { return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} } - if md.Signature.SigLifetimeSecs != nil { - expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) - if time.Now().After(expiry) { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} + } } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} } // Uppercase the fingerprint to be compatible with gpgme diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index 8b708bdf94..7ff7c72eb7 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -3,20 +3,25 @@ package storage import ( "bytes" "encoding/json" + "fmt" "io" "io/ioutil" - "time" - - "github.com/pkg/errors" + "os" + "path/filepath" + "sync/atomic" "github.com/Sirupsen/logrus" + "github.com/containers/image/docker/reference" "github.com/containers/image/image" "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) var ( @@ -26,8 +31,7 @@ var ( // ErrBlobSizeMismatch is returned when PutBlob() is given a blob // with an expected size that doesn't match the reader. ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetTargetManifest() is - // called. + // ErrNoManifestLists is returned when GetTargetManifest() is called. ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") // ErrNoSuchImage is returned when we attempt to access an image which // doesn't exist in the storage area. @@ -35,53 +39,54 @@ var ( ) type storageImageSource struct { - imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice + image *storageImage } type storageImageDestination struct { + image types.Image imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary - Manifest []byte `json:"-"` // Manifest contents, temporary - Signatures []byte `json:"-"` // Signature contents, temporary - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice + directory string // Temporary directory where we store blobs until Commit() time + counter int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + blobOrder []digest.Digest // List of layer blobsums, in the order they were put + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice } -type storageLayerMetadata struct { - Digest string `json:"digest,omitempty"` - Size int64 `json:"size"` - CompressedSize int64 `json:"compressed-size,omitempty"` +type storageImageUnnamedDestination struct { + named *storageImageDestination } type storageImage struct { types.Image - size int64 + imageRef storageReference + reader *storageUnnamedImageReader + size int64 +} + +type storageUnnamedImageReader struct { + ID string + transport storageTransport + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice } -// newImageSource sets us up to read out an image, which needs to already exist. -func newImageSource(imageRef storageReference) (*storageImageSource, error) { +// newUnnamedImageReader sets us up to read out an image without making any changes to what we read before +// handing it back to the caller, without caring about the image's name. +func newUnnamedImageReader(imageRef storageReference) (*storageUnnamedImageReader, error) { + // First, locate the image using the original reference. img, err := imageRef.resolveImage() if err != nil { return nil, err } - image := &storageImageSource{ - imageRef: imageRef, - Created: time.Now(), + // Build the reader object, reading the image referred to by a possibly-updated reference. + image := &storageUnnamedImageReader{ ID: img.ID, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - LayerPosition: make(map[ddigest.Digest]int), + transport: imageRef.transport, + layerPosition: make(map[digest.Digest]int), SignatureSizes: []int{}, } if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { @@ -90,202 +95,378 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) { return image, nil } -// newImageDestination sets us up to write a new image. +// Reference returns the image reference that we used to find this image. Since we're officially +// unnamed, return a reference with no naming information. +func (s storageUnnamedImageReader) Reference() types.ImageReference { + return newReference(s.transport, "", s.ID, nil, "", "") +} + +// Close cleans up any resources we tied up while reading the image. +func (s storageUnnamedImageReader) Close() error { + return nil +} + +// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageUnnamedImageReader) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.getBlobAndLayerID(info, false) + return rc, n, err +} + +// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given, and returns +// the layer ID for the layer, if it was a layer. +func (s *storageUnnamedImageReader) getBlobAndLayerID(info types.BlobInfo, decompress bool) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers, either + // before or after decompression, since we don't care. + uncompressedLayers, err := s.transport.store.LayersByUncompressedDigest(info.Digest) + compressedLayers, err := s.transport.store.LayersByCompressedDigest(info.Digest) + // If it's not a layer, then it must be a data item. + if len(uncompressedLayers) == 0 && len(compressedLayers) == 0 { + b, err := s.transport.store.ImageBigData(s.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // Step through each list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + if len(uncompressedLayers) > 0 { + layer = uncompressedLayers[i%len(uncompressedLayers)] + } else { + layer = compressedLayers[i%len(compressedLayers)] + } + layerID = layer.ID + // If we've been told to force decompression, do so. + if decompress { + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layerID, info.Digest) + } else { + if layer.CompressedSize < 0 { + n = -1 + } else { + n = layer.CompressedSize + } + logrus.Debugf("exporting filesystem layer %q with default compression (%v) for blob %q", layer.CompressionType, layerID, info.Digest) + } + rc, err = s.transport.store.Diff("", layerID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layerID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageUnnamedImageReader) GetManifest() (manifestBlob []byte, MIMEType string, err error) { + manifestBlob, err = s.transport.store.ImageBigData(s.ID, "manifest") + return manifestBlob, manifest.GuessMIMEType(manifestBlob), err +} + +// GetTargetManifest() is not supported. +func (s *storageUnnamedImageReader) GetTargetManifest(d digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + return nil, "", ErrNoManifestLists +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageUnnamedImageReader) GetSignatures() (signatures [][]byte, err error) { + var offset int + sigslice := [][]byte{} + signature, err := s.transport.store.ImageBigData(s.ID, "signatures") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + logrus.Debugf("got error %v looking up signatures for image %q", err, s.ID) + } + return sigslice, nil + } + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageUnnamedImageReader) getSize() (int64, error) { + var sum int64 + // Size up the data blobs. + dataNames, err := s.transport.store.ListImageBigData(s.ID) + if err != nil { + return -1, errors.Wrapf(err, "error reading image %q", s.ID) + } + for _, dataName := range dataNames { + bigSize, err := s.transport.store.ImageBigDataSize(s.ID, dataName) + if err != nil { + return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID) + } + sum += bigSize + } + // Add the signature sizes. + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + // Prepare to walk the layer list. + img, err := s.transport.store.Image(s.ID) + if err != nil { + return -1, errors.Wrapf(err, "error reading image info %q", s.ID) + } + // Walk the layer list. + layerID := img.TopLayer + for layerID != "" { + layer, err := s.transport.store.Layer(layerID) + if err != nil { + return -1, err + } + if layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent + } + return sum, nil +} + +// newImage creates an image that knows its size and always refers to its layer blobs using +// uncompressed digests and sizes +func newImage(s storageReference) (*storageImage, error) { + reader, err := newUnnamedImageReader(s) + if err != nil { + return nil, err + } + // Compute the image's size. + size, err := reader.getSize() + if err != nil { + return nil, err + } + // Build the updated information that we want for the manifest. + simg, err := reader.transport.store.Image(reader.ID) + if err != nil { + return nil, err + } + updatedBlobInfos := []types.BlobInfo{} + diffIDs := []digest.Digest{} + layerID := simg.TopLayer + for layerID != "" { + layer, err := reader.transport.store.Layer(layerID) + if err != nil { + return nil, err + } + if layer.UncompressedDigest == "" { + return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + } + updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...) + diffIDs = append([]digest.Digest{layer.UncompressedDigest}, diffIDs...) + if layer.Parent == "" { + break + } + layerID = layer.Parent + } + info := types.ManifestUpdateInformation{ + Destination: nil, + LayerInfos: updatedBlobInfos, + LayerDiffIDs: diffIDs, + } + options := types.ManifestUpdateOptions{ + LayerInfos: updatedBlobInfos, + InformationOnly: info, + } + // Return a version of the image that uses the updated manifest. + img, err := image.FromSource(reader) + if err != nil { + return nil, err + } + updated, err := img.UpdatedImage(options) + if err != nil { + return nil, err + } + // Discard a digest in the image reference, if there is one, since the updated image manifest + // won't match it. + newName := s.name + if s.digest != "" { + newName = reference.TrimNamed(newName) + } + newRef := newReference(reader.transport, verboseName(newName), reader.ID, newName, "", "") + return &storageImage{Image: updated, imageRef: *newRef, reader: reader, size: size}, nil +} + +// Size returns the image's previously-computed size. +func (s *storageImage) Size() (int64, error) { + return s.size, nil +} + +// newImageSource reads an image that has been updated to not compress layers. +func newImageSource(s storageReference) (*storageImageSource, error) { + image, err := newImage(s) + if err != nil { + return nil, err + } + return &storageImageSource{image: image}, nil +} + +// GetBlob returns either a data blob or an uncompressed layer blob. In practice this avoids attempting +// to recompress any layers that were originally delivered in compressed form, since we know that we +// updated the manifest to refer to the blob using the digest of the uncompressed version. +func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.image.reader.getBlobAndLayerID(info, true) + return rc, n, err +} + +// GetManifest returns the updated manifest. +func (s *storageImageSource) GetManifest() ([]byte, string, error) { + return s.image.Manifest() +} + +// GetTargetManifest still returns an error. +func (s *storageImageSource) GetTargetManifest(d digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + return nil, "", ErrNoManifestLists +} + +// GetSignatures returns the original signatures. +func (s *storageImageSource) GetSignatures() ([][]byte, error) { + return s.image.reader.GetSignatures() +} + +// Reference returns a trimmed copy of the image reference that we used to find this image. +func (s storageImageSource) Reference() types.ImageReference { + return s.image.imageRef +} + +// Close cleans up any resources we tied up while reading the image. +func (s *storageImageSource) Close() error { + return s.image.Close() +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { + directory, err := ioutil.TempDir("", "storage") + if err != nil { + return nil, errors.Wrapf(err, "error creating a temporary directory") + } image := &storageImageDestination{ imageRef: imageRef, - Tag: imageRef.reference, - Created: time.Now(), - ID: imageRef.id, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - BlobData: make(map[ddigest.Digest][]byte), + directory: directory, + blobDiffIDs: make(map[digest.Digest]digest.Digest), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), SignatureSizes: []int{}, } return image, nil } -func (s storageImageSource) Reference() types.ImageReference { - return s.imageRef -} - +// Reference returns the image reference that we want the resulting image to match. func (s storageImageDestination) Reference() types.ImageReference { return s.imageRef } -func (s storageImageSource) Close() error { - return nil -} - -func (s storageImageDestination) Close() error { - return nil +// Close cleans up the temporary directory. +func (s *storageImageDestination) Close() error { + if s.image != nil { + img := s.image + s.image = nil + img.Close() + } + return os.RemoveAll(s.directory) } +// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed +// data when handing it to us. func (s storageImageDestination) ShouldCompressLayers() bool { - // We ultimately have to decompress layers to populate trees on disk, - // so callers shouldn't bother compressing them before handing them to - // us, if they're not already compressed. + // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't + // bother compressing them before handing them to us, if they're not already compressed. return false } -// putBlob stores a layer or data blob, optionally enforcing that a digest in -// blobinfo matches the incoming data. -func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) { +// PutBlob stores a layer or data blob in our temporary directory, checking that any information +// in the blobinfo matches the incoming data. +func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { blobSize := blobinfo.Size - digest := blobinfo.Digest + blobDigest := blobinfo.Digest errorBlobInfo := types.BlobInfo{ Digest: "", Size: -1, } - // Try to read an initial snippet of the blob. - buf := [archive.HeaderSize]byte{} - n, err := io.ReadAtLeast(stream, buf[:], len(buf)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return errorBlobInfo, err - } - // Set up to read the whole blob (the initial snippet, plus the rest) - // while digesting it with either the default, or the passed-in digest, - // if one was specified. - hasher := ddigest.Canonical.Digester() - if digest.Validate() == nil { - if a := digest.Algorithm(); a.Available() { + // Set up to digest the blob and count its size while saving it to a file. + hasher := digest.Canonical.Digester() + if blobDigest.Validate() == nil { + if a := blobDigest.Algorithm(); a.Available() { hasher = a.Digester() } } - hash := "" + diffID := digest.Canonical.Digester() + filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.counter, 1))) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) + } + defer file.Close() counter := ioutils.NewWriteCounter(hasher.Hash()) - defragmented := io.MultiReader(bytes.NewBuffer(buf[:n]), stream) - multi := io.TeeReader(defragmented, counter) - if (n > 0) && archive.IsArchive(buf[:n]) { - // It's a filesystem layer. If it's not the first one in the - // image, we assume that the most recently added layer is its - // parent. - parentLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - parentLayer = layerList[len(layerList)-1] - } - } - // If we have an expected content digest, generate a layer ID - // based on the parent's ID and the expected content digest. - id := "" - if digest.Validate() == nil { - id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() - } - // Attempt to create the identified layer and import its contents. - layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - if errors.Cause(err) == storage.ErrDuplicateID { - // We specified an ID, and there's already a layer with - // the same ID. Drain the input so that we can look at - // its length and digest. - _, err := io.Copy(ioutil.Discard, multi) - if err != nil && err != io.EOF { - logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - hash = hasher.Digest().String() - } else { - // Applied the layer with the specified ID. Note the - // size info and computed digest. - hash = hasher.Digest().String() - layerMeta := storageLayerMetadata{ - Digest: hash, - CompressedSize: counter.Count, - Size: uncompressedSize, - } - if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { - s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) - } - // Hang on to the new layer's ID. - id = layer.ID - } - // Check if the size looks right. - if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobSizeMismatch - } - // If the content digest was specified, verify it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = counter.Count - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Record that this layer blob is a layer, and the layer ID it - // ended up having. This is a list, in case the same blob is - // being applied more than once. - s.Layers[digest] = append(s.Layers[digest], id) - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count}) - if layer != nil { - logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) - } else { - logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) - } - } else { - // It's just data. Finish scanning it in, check that our - // computed digest matches the passed-in digest, and store it, - // but leave it out of the blob-to-layer-ID map so that we can - // tell that it's not a layer. - blob, err := ioutil.ReadAll(multi) - if err != nil && err != io.EOF { - return errorBlobInfo, err - } - hash = hasher.Digest().String() - if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size { - logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size) - return errorBlobInfo, ErrBlobSizeMismatch - } - // If we were given a digest, verify that the content matches - // it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = int64(len(blob)) - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Save the blob for when we Commit(). - s.BlobData[digest] = blob - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))}) - logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) + reader := io.TeeReader(io.TeeReader(stream, counter), file) + decompressed, err := archive.DecompressStream(reader) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") + } + // Copy the data to the file. + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Ensure that any information that we were given about the blob is correct. + if blobDigest.Validate() == nil && blobDigest != hasher.Digest() { + return errorBlobInfo, ErrBlobDigestMismatch + } + if blobSize >= 0 && blobSize != counter.Count { + return errorBlobInfo, ErrBlobSizeMismatch + } + // Record information about the blob. + s.blobOrder = append(s.blobOrder, hasher.Digest()) + s.blobDiffIDs[hasher.Digest()] = diffID.Digest() + s.fileSizes[hasher.Digest()] = counter.Count + s.filenames[hasher.Digest()] = filename + if blobDigest.Validate() != nil { + blobDigest = hasher.Digest() + } + if blobSize < 0 { + blobSize = counter.Count } return types.BlobInfo{ - Digest: digest, + Digest: blobDigest, Size: blobSize, }, nil } -// PutBlob is used to both store filesystem layers and binary data that is part -// of the image. Filesystem layers are assumed to be imported in order, as -// that is required by some of the underlying storage drivers. -func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { - return s.putBlob(stream, blobinfo, true) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. +// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be +// reapplied using ReapplyBlob. +// // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); // it returns a non-nil error only on an unexpected failure. @@ -293,93 +474,260 @@ func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, if blobinfo.Digest == "" { return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) } - for _, blob := range s.BlobList { - if blob.Digest == blobinfo.Digest { - return true, blob.Size, nil - } + if err := blobinfo.Digest.Validate(); err != nil { + return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`) } + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, size, nil + } + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil { + return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].UncompressedSize, nil + } + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil { + return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].CompressedSize, nil + } + // Nope, we don't have it. return false, -1, nil } +// ReapplyBlob is now a no-op, assuming PutBlob() says we already have it. func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { - err := blobinfo.Digest.Validate() + present, size, err := s.HasBlob(blobinfo) + if !present { + return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo) + } if err != nil { - return types.BlobInfo{}, err + return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo) } - if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, blobinfo.Digest.String()) + blobinfo.Size = size + s.blobOrder = append(s.blobOrder, blobinfo.Digest) + return blobinfo, nil +} + +// GetBlob() shouldn't really be called, but include an implementation in case other parts of the library +// start needing it. +func (s *storageImageUnnamedDestination) GetBlob(blobinfo types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + if blobinfo.Digest == "" { + return nil, -1, errors.Errorf(`can't read a blob with unknown digest`) + } + if err := blobinfo.Digest.Validate(); err != nil { + return nil, -1, errors.Wrapf(err, `can't check for a blob with invalid digest`) + } + // Check if we've already cached the blob as a file. + if filename, ok := s.named.filenames[blobinfo.Digest]; ok { + f, err := os.Open(filename) if err != nil { - return types.BlobInfo{}, err + return nil, -1, errors.Wrapf(err, `can't read file %q`, filename) } - return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil + return f, -1, nil + } + // Check if we have a wasn't-compressed layer in storage that's based on that blob. If we have one, + // start reading it. + layers, err := s.named.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil { + return nil, -1, errors.Wrapf(err, `error looking for layers with digest %q`, blobinfo.Digest) } - layerList := s.Layers[blobinfo.Digest] - rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) + if len(layers) > 0 { + rc, err := s.named.imageRef.transport.store.Diff("", layers[0].ID, nil) + return rc, -1, err + } + // Check if we have a was-compressed layer in storage that's based on that blob. If we have one, + // start reading it. + layers, err = s.named.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) if err != nil { - return types.BlobInfo{}, err + return nil, -1, errors.Wrapf(err, `error looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + rc, err := s.named.imageRef.transport.store.Diff("", layers[0].ID, nil) + return rc, -1, err } - return s.putBlob(rc, blobinfo, false) + // Nope, we don't have it. + return nil, -1, errors.Errorf(`error locating blob with blobsum %q`, blobinfo.Digest.String()) } func (s *storageImageDestination) Commit() error { - // Create the image record. + // Find the list of layer blobs. We have to implement enough of an ImageSource to be able to + // parse the manifest to get a list of which blobs are filesystem layers, leaving any cached + // files that aren't filesystem layers to be saved as data items. + if s.image == nil { + img, err := image.FromSource(&storageImageUnnamedDestination{named: s}) + if err != nil { + return errors.Wrapf(err, "error locating manifest for layer blob list") + } + s.image = img + } + layerBlobs := s.image.LayerInfos() + // Extract or find the layers. lastLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - lastLayer = layerList[len(layerList)-1] + addedLayers := []string{} + for _, blob := range layerBlobs { + var diff io.ReadCloser + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + has, _, err := s.HasBlob(blob) + if err != nil { + return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + continue + } + // Check if we cached a file with that blobsum. If we didn't already have a layer with + // the blob's contents, we should have gotten a copy. + if filename, ok := s.filenames[blob.Digest]; ok { + // Use the file's contents to initialize the layer. + file, err2 := os.Open(filename) + if err2 != nil { + return errors.Wrapf(err2, "error opening file %q", filename) + } + defer file.Close() + diff = file + } + if diff == nil { + // Try to find a layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) + } + // Use the layer's contents to initialize the new layer. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) + } + defer diff.Close() } + if diff == nil { + // This shouldn't have happened. + return errors.Errorf("error applying blob %q: content not found", blob.Digest) + } + // Build the new layer using the diff, regardless of where it came from. + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff) + if err != nil { + return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + } + lastLayer = layer.ID + addedLayers = append([]string{lastLayer}, addedLayers...) + } + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + var options *storage.ImageOptions + if inspect, err := s.image.Inspect(); err == nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options = &storage.ImageOptions{ + CreationDate: inspect.Created, + } + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if configInfo := s.image.ConfigInfo(); intendedID == "" && configInfo.Digest.Validate() == nil { + intendedID = configInfo.Digest.Hex() } - img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) if err != nil { if errors.Cause(err) != storage.ErrDuplicateID { logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", s.ID) + return errors.Wrapf(err, "error creating image %q", intendedID) } - img, err = s.imageRef.transport.store.Image(s.ID) + img, err = s.imageRef.transport.store.Image(intendedID) if err != nil { - return errors.Wrapf(err, "error reading image %q", s.ID) + return errors.Wrapf(err, "error reading image %q", intendedID) } if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID) + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) } logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) } else { logrus.Debugf("created new image ID %q", img.ID) } - s.ID = img.ID - names := img.Names - if s.Tag != "" { - names = append(names, s.Tag) + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} } - // We have names to set, so move those names to this image. - if len(names) > 0 { - if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := ioutil.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } - logrus.Debugf("error setting names on image %q: %v", img.ID, err) - return err + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) } - logrus.Debugf("set names of image %q to %v", img.ID, names) } - // Save the data blobs to disk, and drop their contents from memory. - keys := []ddigest.Digest{} - for k, v := range s.BlobData { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { + // Set the reference's name on the image. + if name := s.imageRef.DockerReference(); name != nil { + names := append([]string{verboseName(name)}, oldNames...) + if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } - logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) - return err + logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) + return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) } - keys = append(keys, k) + logrus.Debugf("set names of image %q to %v", img.ID, names) } - for _, key := range keys { - delete(s.BlobData, key) + // Save the manifest. + manifest, _, err := s.image.Manifest() + if err != nil { + manifest = s.manifest } - // Save the manifest, if we have one. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "manifest", manifest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -387,12 +735,14 @@ func (s *storageImageDestination) Commit() error { return err } // Save the signatures, if we have any. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err } // Save our metadata. metadata, err := json.Marshal(s) @@ -404,7 +754,7 @@ func (s *storageImageDestination) Commit() error { return err } if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -417,7 +767,7 @@ func (s *storageImageDestination) Commit() error { } var manifestMIMETypes = []string{ - // TODO(runcom): we'll add OCI as part of another PR here + imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, @@ -427,23 +777,58 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string { return manifestMIMETypes } -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +// GetManifest reads the manifest that we intend to store. If we haven't been given one (yet?), +// generate one. +func (s *storageImageUnnamedDestination) GetManifest() ([]byte, string, error) { + if len(s.named.manifest) == 0 { + m := imgspecv1.Manifest{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + Annotations: make(map[string]string), + } + for _, blob := range s.named.blobOrder { + desc := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageLayer, + Digest: blob, + Size: -1, + } + m.Layers = append(m.Layers, desc) + } + encoded, err := json.Marshal(m) + if err != nil { + return nil, "", errors.Wrapf(err, "no manifest written yet, and got an error encoding a temporary one") + } + s.named.manifest = encoded + } + return s.named.manifest, manifest.GuessMIMEType(s.named.manifest), nil +} + +// GetTargetManifest reads a manifest among several that we might intend to store. +func (s *storageImageUnnamedDestination) GetTargetManifest(targetDigest digest.Digest) ([]byte, string, error) { + if len(s.named.manifest) == 0 { + return nil, "", errors.Errorf("no manifest written yet") + } + if digest.Canonical.FromBytes(s.named.manifest) != targetDigest { + return nil, "", errors.Errorf("no matching target manifest") + } + return s.named.manifest, manifest.GuessMIMEType(s.named.manifest), nil +} + +// PutManifest writes the manifest to the destination. func (s *storageImageDestination) PutManifest(manifest []byte) error { - s.Manifest = make([]byte, len(manifest)) - copy(s.Manifest, manifest) + s.manifest = make([]byte, len(manifest)) + copy(s.manifest, manifest) return nil } -// SupportsSignatures returns an error if we can't expect GetSignatures() to -// return data that was previously supplied to PutSignatures(). +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). func (s *storageImageDestination) SupportsSignatures() error { return nil } -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be // uploaded to the image destination, true otherwise. func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { return false @@ -454,6 +839,7 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool { return true } +// PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { sizes := []int{} sigblob := []byte{} @@ -464,152 +850,33 @@ func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { copy(newblob[len(sigblob):], sig) sigblob = newblob } - s.Signatures = sigblob + s.signatures = sigblob s.SignatureSizes = sizes return nil } -func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // If the blob was "put" more than once, we have multiple layer IDs - // which should all produce the same diff. For the sake of tests that - // want to make sure we created different layers each time the blob was - // "put", though, cycle through the layers. - layerList := s.Layers[info.Digest] - position, ok := s.LayerPosition[info.Digest] - if !ok { - position = 0 - } - s.LayerPosition[info.Digest] = (position + 1) % len(layerList) - logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) - rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) - return rc, n, layerList[position], err -} - -func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { - layer, err := store.Layer(layerID) - if err != nil { - return nil, -1, err - } - layerMeta := storageLayerMetadata{ - CompressedSize: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.CompressedSize <= 0 { - n = -1 - } else { - n = layerMeta.CompressedSize - } - diff, err := store.Diff("", layer.ID, nil) - if err != nil { - return nil, -1, err - } - return diff, n, nil -} - -func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { - manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest") - return manifestBlob, manifest.GuessMIMEType(manifestBlob), err -} - -func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { - return nil, "", ErrNoManifestLists -} - -func (s *storageImageSource) GetSignatures() (signatures [][]byte, err error) { - var offset int - signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") - if err != nil { - return nil, err - } - sigslice := [][]byte{} - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil +// Close cleans up any resources we tied up while writing the image. +func (s *storageImageUnnamedDestination) Close() error { + return s.named.Close() } -func (s *storageImageSource) getSize() (int64, error) { - var sum int64 - names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) - if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id) - } - for _, name := range names { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.imageRef.id, name) - if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id) - } - sum += bigSize - } - for _, sigSize := range s.SignatureSizes { - sum += int64(sigSize) - } - for _, layerList := range s.Layers { - for _, layerID := range layerList { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - layerMeta := storageLayerMetadata{ - Size: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.Size < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layerMeta.Size - } - } - return sum, nil +// Reference returns the image reference that we used to initialize this image. Since we're officially +// unnamed, return a reference with no naming information. +func (s *storageImageUnnamedDestination) Reference() types.ImageReference { + ref := newReference(s.named.imageRef.transport, "", "", nil, "", "") + return ref } -func (s *storageImage) Size() (int64, error) { - return s.size, nil -} - -// newImage creates an image that also knows its size -func newImage(s storageReference) (types.Image, error) { - src, err := newImageSource(s) - if err != nil { - return nil, err +// GetSignatures splits up the signature blob and returns a slice of byte slices. +func (s *storageImageUnnamedDestination) GetSignatures() ([][]byte, error) { + sigs := [][]byte{} + first := 0 + for _, length := range s.named.SignatureSizes { + sigs = append(sigs, s.named.signatures[first:first+length]) + first += length } - img, err := image.FromSource(src) - if err != nil { - return nil, err - } - size, err := src.getSize() - if err != nil { - return nil, err + if first == 0 { + return nil, nil } - return &storageImage{Image: img, size: size}, nil + return sigs, nil } diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go index 9aee75be02..1a8c1632f0 100644 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -7,6 +7,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/types" "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -18,9 +19,11 @@ type storageReference struct { reference string id string name reference.Named + tag string + digest digest.Digest } -func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { +func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference { // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by @@ -30,6 +33,8 @@ func newReference(transport storageTransport, reference, id string, name referen reference: reference, id: id, name: name, + tag: tag, + digest: digest, } } @@ -76,8 +81,21 @@ func (s storageReference) Transport() types.ImageTransport { } } -// Return a name with a tag, if we have a name to base them on. +// Return a name with a tag or digest, if we have either, else return it bare. func (s storageReference) DockerReference() reference.Named { + if s.name == nil { + return nil + } + if s.tag != "" { + if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil { + return namedTagged + } + } + if s.digest != "" { + if canonical, err := reference.WithDigest(s.name, s.digest); err == nil { + return canonical + } + } return s.name } @@ -91,7 +109,7 @@ func (s storageReference) StringWithinTransport() string { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.name == nil { + if s.reference == "" { return storeSpec + "@" + s.id } if s.id == "" { @@ -120,11 +138,8 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" namespaces := []string{} if s.name != nil { - if s.id != "" { - // The reference without the ID is also a valid namespace. - namespaces = append(namespaces, storeSpec+s.reference) - } - components := strings.Split(s.name.Name(), "/") + name := reference.TrimNamed(s.name) + components := strings.Split(name.String(), "/") for len(components) > 0 { namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) components = components[:len(components)-1] diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index 336dd814a4..d52ba85319 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -12,8 +12,11 @@ import ( "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/go-digest" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" +) + +const ( + minimumTruncatedIDLength = 3 ) func init() { @@ -101,60 +104,124 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { var name reference.Named - var sum digest.Digest - var err error if ref == "" { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference") } if ref[0] == '[' { // Ignore the store specifier. closeIndex := strings.IndexRune(ref, ']') if closeIndex < 1 { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) } ref = ref[closeIndex+1:] } - refInfo := strings.SplitN(ref, "@", 2) - if len(refInfo) == 1 { - // A name. - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err + + // The last segment, if there's more than one, is either a digest from a reference, or an image ID. + split := strings.LastIndex(ref, "@") + idOrDigest := "" + if split != -1 { + // Peel off that last bit so that we can work on the rest. + idOrDigest = ref[split+1:] + if idOrDigest == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) } - } else if len(refInfo) == 2 { - // An ID, possibly preceded by a name. - if refInfo[0] != "" { - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err - } + ref = ref[:split] + } + + // The middle segment (now the last segment), if there is one, is a digest. + split = strings.LastIndex(ref, "@") + sum := digest.Digest("") + if split != -1 { + sum = digest.Digest(ref[split+1:]) + if sum == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) } - sum, err = digest.Parse(refInfo[1]) - if err != nil || sum.Validate() != nil { - sum, err = digest.Parse("sha256:" + refInfo[1]) - if err != nil || sum.Validate() != nil { - return nil, err - } + ref = ref[:split] + } + + // If we have something that unambiguously should be a digest, validate it, and then the third part, + // if we have one, as an ID. + id := "" + if sum != "" { + if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest) + } + if err := sum.Validate(); err != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + id = idOrDigest + if img, err := store.Image(idOrDigest); err == nil && img != nil && len(id) >= minimumTruncatedIDLength { + // The ID is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } + } else if idOrDigest != "" { + // There was no middle portion, so the final portion could be either a digest or an ID. + if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil { + // It's an ID. + id = idOrDigest + } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil { + // It's a digest. + sum = idSum + } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength { + // It's a truncated version of the ID of an image that's present in local storage, + // and we may need the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) } - } else { // Coverage: len(refInfo) is always 1 or 2 - // Anything else: store specified in a form we don't - // recognize. - return nil, ErrInvalidReference } + + // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" { + if img, err := store.Image(idOrDigest); err == nil && img != nil { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + // The initial portion is probably a name, possibly with a tag. + if ref != "" { + var err error + if name, err = reference.ParseNormalizedNamed(ref); err != nil { + return nil, errors.Wrapf(err, "error parsing named reference %q", ref) + } + } + if name == nil && sum == "" && id == "" { + return nil, errors.Errorf("error parsing reference") + } + + // Construct a copy of the store spec. optionsList := "" options := store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - id := "" - if sum.Validate() == nil { - id = sum.Hex() - } + + // Convert the name back into a reference string, if we got a name. refname := "" + tag := "" if name != nil { - name = reference.TagNameOnly(name) - refname = verboseName(name) + if sum.Validate() == nil { + canonical, err := reference.WithDigest(name, sum) + if err != nil { + return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum) + } + refname = verboseName(canonical) + } else { + name = reference.TagNameOnly(name) + tagged, ok := name.(reference.Tagged) + if !ok { + return nil, errors.Errorf("error parsing possibly-tagless name %q", ref) + } + refname = verboseName(name) + tag = tagged.Tag() + } } if refname == "" { logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) @@ -163,7 +230,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( } else { logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil } func (s *storageTransport) GetStore() (storage.Store, error) { @@ -182,11 +249,14 @@ func (s *storageTransport) GetStore() (storage.Store, error) { return s.store, nil } -// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or // "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or // "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", // tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { var store storage.Store // Check if there's a store location prefix. If there is, then it @@ -335,7 +405,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if err != nil { return err } - _, err = ddigest.Parse("sha256:" + scopeInfo[1]) + _, err = digest.Parse("sha256:" + scopeInfo[1]) if err != nil { return err } @@ -345,11 +415,28 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { return nil } -func verboseName(name reference.Named) string { - name = reference.TagNameOnly(name) +func verboseName(r reference.Reference) string { + if r == nil { + return "" + } + named, isNamed := r.(reference.Named) + digested, isDigested := r.(reference.Digested) + tagged, isTagged := r.(reference.Tagged) + name := "" tag := "" - if tagged, ok := name.(reference.NamedTagged); ok { - tag = ":" + tagged.Tag() + sum := "" + if isNamed { + name = (reference.TrimNamed(named)).String() + } + if isTagged { + if tagged.Tag() != "" { + tag = ":" + tagged.Tag() + } + } + if isDigested { + if digested.Digest().Validate() == nil { + sum = "@" + digested.Digest().String() + } } - return name.Name() + tag + return name + tag + sum } diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index d0fb1aad64..8110f4431e 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -1,5 +1,5 @@ github.com/Sirupsen/logrus 7f4b1adc791766938c29457bed0703fb9134421a -github.com/containers/storage 105f7c77aef0c797429e41552743bf5b03b63263 +github.com/containers/storage 5d8c2f87387fa5be9fa526ae39fbd79b8bdf27be github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/distribution df5327f76fb6468b84a87771e361762b8be23fdb github.com/docker/docker 75843d36aa5c3eaade50da005f9e0ff2602f3d5e