From e204975c8d71fbf3aa44ae0920280af8d251f48c Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Wed, 12 Jul 2017 17:06:03 -0400 Subject: [PATCH] Fix handling of references with digests References can contain digests or tags, but not both, so fix our parser to handle either type of name for use as image names. Update PolicyConfigurationNamespaces() to discard tags and digests when computing the list of alternate namespaces which can match an image. Drop digests from a storageImageSource's copies of references, in case we're reading and writing images using references which contain digests, since schema1 images tend to fail verification of those. Signed-off-by: Nalin Dahyabhai --- storage/storage_image.go | 135 ++++++++++++++--------- storage/storage_reference.go | 31 ++++-- storage/storage_reference_test.go | 6 +- storage/storage_test.go | 1 + storage/storage_transport.go | 173 ++++++++++++++++++++++-------- storage/storage_transport_test.go | 8 +- 6 files changed, 245 insertions(+), 109 deletions(-) diff --git a/storage/storage_image.go b/storage/storage_image.go index 39e35a7171..8fbfc87721 100644 --- a/storage/storage_image.go +++ b/storage/storage_image.go @@ -11,6 +11,7 @@ import ( "path/filepath" "sync/atomic" + "github.com/containers/image/docker/reference" "github.com/containers/image/image" "github.com/containers/image/manifest" "github.com/containers/image/types" @@ -58,29 +59,36 @@ type storageImageDestination struct { SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice } +type storageImageUnnamedDestination struct { + named *storageImageDestination +} + type storageImage struct { types.Image - reader *storageImageReader - size int64 + imageRef storageReference + reader *storageUnnamedImageReader + size int64 } -type storageImageReader struct { +type storageUnnamedImageReader struct { ID string - imageRef storageReference + transport storageTransport layerPosition map[digest.Digest]int // Where we are in reading a blob's layers SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice } -// newImageReader sets us up to read out an image without making any changes to what we read before -// handing it back to the caller. -func newImageReader(imageRef storageReference) (*storageImageReader, error) { +// newUnnamedImageReader sets us up to read out an image without making any changes to what we read before +// handing it back to the caller, without caring about the image's name. +func newUnnamedImageReader(imageRef storageReference) (*storageUnnamedImageReader, error) { + // First, locate the image using the original reference. img, err := imageRef.resolveImage() if err != nil { return nil, err } - image := &storageImageReader{ + // Build the reader object, reading the image referred to by a possibly-updated reference. + image := &storageUnnamedImageReader{ ID: img.ID, - imageRef: imageRef, + transport: imageRef.transport, layerPosition: make(map[digest.Digest]int), SignatureSizes: []int{}, } @@ -91,23 +99,25 @@ func newImageReader(imageRef storageReference) (*storageImageReader, error) { } // Reference returns the image reference that we used to find this image. -func (s storageImageReader) Reference() types.ImageReference { - return s.imageRef +// Since our goal here is to remain unnamed, return a reference with no naming +// information. +func (s storageUnnamedImageReader) Reference() types.ImageReference { + return newReference(s.transport, "", s.ID, nil, "", "") } // Close cleans up any resources we tied up while reading the image. -func (s storageImageReader) Close() error { +func (s storageUnnamedImageReader) Close() error { return nil } // GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageReader) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { +func (s *storageUnnamedImageReader) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { rc, n, _, err = s.getBlobAndLayerID(info) return rc, n, err } // getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageReader) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { +func (s *storageUnnamedImageReader) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { var layer storage.Layer var diffOptions *storage.DiffOptions // We need a valid digest value. @@ -118,10 +128,10 @@ func (s *storageImageReader) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC // Check if the blob corresponds to a diff that was used to initialize any layers. Our // callers should only ask about layers using their uncompressed digests, so no need to // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + layers, err := s.transport.store.LayersByUncompressedDigest(info.Digest) // If it's not a layer, then it must be a data item. if len(layers) == 0 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) + b, err := s.transport.store.ImageBigData(s.ID, info.Digest.String()) if err != nil { return nil, -1, "", err } @@ -149,7 +159,7 @@ func (s *storageImageReader) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC n = layer.UncompressedSize } logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) - rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + rc, err = s.transport.store.Diff("", layer.ID, diffOptions) if err != nil { return nil, -1, "", err } @@ -157,23 +167,23 @@ func (s *storageImageReader) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC } // GetManifest() reads the image's manifest. -func (s *storageImageReader) GetManifest() (manifestBlob []byte, MIMEType string, err error) { - manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest") +func (s *storageUnnamedImageReader) GetManifest() (manifestBlob []byte, MIMEType string, err error) { + manifestBlob, err = s.transport.store.ImageBigData(s.ID, "manifest") return manifestBlob, manifest.GuessMIMEType(manifestBlob), err } // GetTargetManifest() is not supported. -func (s *storageImageReader) GetTargetManifest(d digest.Digest) (manifestBlob []byte, MIMEType string, err error) { +func (s *storageUnnamedImageReader) GetTargetManifest(d digest.Digest) (manifestBlob []byte, MIMEType string, err error) { return nil, "", ErrNoManifestLists } // GetSignatures() parses the image's signatures blob into a slice of byte slices. -func (s *storageImageReader) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { +func (s *storageUnnamedImageReader) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { var offset int sigslice := [][]byte{} signature := []byte{} if len(s.SignatureSizes) > 0 { - signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") + signatureBlob, err := s.transport.store.ImageBigData(s.ID, "signatures") if err != nil { return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID) } @@ -191,15 +201,15 @@ func (s *storageImageReader) GetSignatures(ctx context.Context) (signatures [][] // getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the // signatures, and the uncompressed sizes of all of the image's layers. -func (s *storageImageReader) getSize() (int64, error) { +func (s *storageUnnamedImageReader) getSize() (int64, error) { var sum int64 // Size up the data blobs. - dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID) + dataNames, err := s.transport.store.ListImageBigData(s.ID) if err != nil { return -1, errors.Wrapf(err, "error reading image %q", s.ID) } for _, dataName := range dataNames { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName) + bigSize, err := s.transport.store.ImageBigDataSize(s.ID, dataName) if err != nil { return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID) } @@ -210,14 +220,14 @@ func (s *storageImageReader) getSize() (int64, error) { sum += int64(sigSize) } // Prepare to walk the layer list. - img, err := s.imageRef.transport.store.Image(s.ID) + img, err := s.transport.store.Image(s.ID) if err != nil { return -1, errors.Wrapf(err, "error reading image info %q", s.ID) } // Walk the layer list. layerID := img.TopLayer for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) + layer, err := s.transport.store.Layer(layerID) if err != nil { return -1, err } @@ -236,7 +246,7 @@ func (s *storageImageReader) getSize() (int64, error) { // newImage creates an image that knows its size and always refers to its layer blobs using // uncompressed digests and sizes func newImage(s storageReference) (*storageImage, error) { - reader, err := newImageReader(s) + reader, err := newUnnamedImageReader(s) if err != nil { return nil, err } @@ -246,7 +256,7 @@ func newImage(s storageReference) (*storageImage, error) { return nil, err } // Build the updated information that we want for the manifest. - simg, err := reader.imageRef.transport.store.Image(reader.ID) + simg, err := reader.transport.store.Image(reader.ID) if err != nil { return nil, err } @@ -254,7 +264,7 @@ func newImage(s storageReference) (*storageImage, error) { diffIDs := []digest.Digest{} layerID := simg.TopLayer for layerID != "" { - layer, err := reader.imageRef.transport.store.Layer(layerID) + layer, err := reader.transport.store.Layer(layerID) if err != nil { return nil, err } @@ -293,7 +303,14 @@ func newImage(s storageReference) (*storageImage, error) { if err != nil { return nil, err } - return &storageImage{Image: updated, reader: reader, size: size}, nil + // Discard a digest in the image reference, if there is one, since the updated image manifest + // won't match it. + newName := s.name + if s.digest != "" { + newName = reference.TrimNamed(newName) + } + newRef := newReference(reader.transport, verboseName(newName), reader.ID, newName, "", "") + return &storageImage{Image: updated, imageRef: *newRef, reader: reader, size: size}, nil } // Size returns the image's previously-computed size. @@ -332,9 +349,9 @@ func (s *storageImageSource) GetSignatures(ctx context.Context) ([][]byte, error return s.image.reader.GetSignatures(ctx) } -// Reference returns the image reference that we used to find this image. +// Reference returns a trimmed copy of the image reference that we used to find this image. func (s storageImageSource) Reference() types.ImageReference { - return s.image.reader.imageRef + return s.image.imageRef } // Close cleans up any resources we tied up while reading the image. @@ -499,7 +516,7 @@ func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.Bl // GetBlob() shouldn't really be called, but include an implementation in case other parts of the library // start needing it. -func (s *storageImageDestination) GetBlob(blobinfo types.BlobInfo) (rc io.ReadCloser, n int64, err error) { +func (s *storageImageUnnamedDestination) GetBlob(blobinfo types.BlobInfo) (rc io.ReadCloser, n int64, err error) { if blobinfo.Digest == "" { return nil, -1, errors.Errorf(`can't read a blob with unknown digest`) } @@ -507,7 +524,7 @@ func (s *storageImageDestination) GetBlob(blobinfo types.BlobInfo) (rc io.ReadCl return nil, -1, errors.Wrapf(err, `can't check for a blob with invalid digest`) } // Check if we've already cached the blob as a file. - if filename, ok := s.filenames[blobinfo.Digest]; ok { + if filename, ok := s.named.filenames[blobinfo.Digest]; ok { f, err := os.Open(filename) if err != nil { return nil, -1, errors.Wrapf(err, `can't read file %q`, filename) @@ -516,22 +533,22 @@ func (s *storageImageDestination) GetBlob(blobinfo types.BlobInfo) (rc io.ReadCl } // Check if we have a wasn't-compressed layer in storage that's based on that blob. If we have one, // start reading it. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + layers, err := s.named.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) if err != nil { return nil, -1, errors.Wrapf(err, `error looking for layers with digest %q`, blobinfo.Digest) } if len(layers) > 0 { - rc, err := s.imageRef.transport.store.Diff("", layers[0].ID, nil) + rc, err := s.named.imageRef.transport.store.Diff("", layers[0].ID, nil) return rc, -1, err } // Check if we have a was-compressed layer in storage that's based on that blob. If we have one, // start reading it. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + layers, err = s.named.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) if err != nil { return nil, -1, errors.Wrapf(err, `error looking for compressed layers with digest %q`, blobinfo.Digest) } if len(layers) > 0 { - rc, err := s.imageRef.transport.store.Diff("", layers[0].ID, nil) + rc, err := s.named.imageRef.transport.store.Diff("", layers[0].ID, nil) return rc, -1, err } // Nope, we don't have it. @@ -543,7 +560,7 @@ func (s *storageImageDestination) Commit() error { // parse the manifest to get a list of which blobs are filesystem layers, leaving any cached // files that aren't filesystem layers to be saved as data items. if s.image == nil { - img, err := image.FromSource(s) + img, err := image.FromSource(&storageImageUnnamedDestination{named: s}) if err != nil { return errors.Wrapf(err, "error locating manifest for layer blob list") } @@ -701,7 +718,7 @@ func (s *storageImageDestination) Commit() error { logrus.Debugf("set names of image %q to %v", img.ID, names) } // Save the manifest. - manifest, _, err := s.GetManifest() + manifest, _, err := s.image.Manifest() if err != nil { manifest = s.manifest } @@ -757,15 +774,15 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string { // GetManifest reads the manifest that we intend to store. If we haven't been given one (yet?), // generate one. -func (s *storageImageDestination) GetManifest() ([]byte, string, error) { - if len(s.manifest) == 0 { +func (s *storageImageUnnamedDestination) GetManifest() ([]byte, string, error) { + if len(s.named.manifest) == 0 { m := imgspecv1.Manifest{ Versioned: imgspec.Versioned{ SchemaVersion: 2, }, Annotations: make(map[string]string), } - for _, blob := range s.blobOrder { + for _, blob := range s.named.blobOrder { desc := imgspecv1.Descriptor{ MediaType: imgspecv1.MediaTypeImageLayer, Digest: blob, @@ -777,20 +794,20 @@ func (s *storageImageDestination) GetManifest() ([]byte, string, error) { if err != nil { return nil, "", errors.Wrapf(err, "no manifest written yet, and got an error encoding a temporary one") } - s.manifest = encoded + s.named.manifest = encoded } - return s.manifest, manifest.GuessMIMEType(s.manifest), nil + return s.named.manifest, manifest.GuessMIMEType(s.named.manifest), nil } // GetTargetManifest reads a manifest among several that we might intend to store. -func (s *storageImageDestination) GetTargetManifest(targetDigest digest.Digest) ([]byte, string, error) { - if len(s.manifest) == 0 { +func (s *storageImageUnnamedDestination) GetTargetManifest(targetDigest digest.Digest) ([]byte, string, error) { + if len(s.named.manifest) == 0 { return nil, "", errors.Errorf("no manifest written yet") } - if digest.Canonical.FromBytes(s.manifest) != targetDigest { + if digest.Canonical.FromBytes(s.named.manifest) != targetDigest { return nil, "", errors.Errorf("no matching target manifest") } - return s.manifest, manifest.GuessMIMEType(s.manifest), nil + return s.named.manifest, manifest.GuessMIMEType(s.named.manifest), nil } // PutManifest writes the manifest to the destination. @@ -833,12 +850,24 @@ func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { return nil } +// Close cleans up any resources we tied up while writing the image. +func (s *storageImageUnnamedDestination) Close() error { + return s.named.Close() +} + +// Reference returns the image reference that we used to initialize this image. Since we're officially +// unnamed, return a reference with no naming information. +func (s *storageImageUnnamedDestination) Reference() types.ImageReference { + ref := newReference(s.named.imageRef.transport, "", "", nil, "", "") + return ref +} + // GetSignatures splits up the signature blob and returns a slice of byte slices. -func (s *storageImageDestination) GetSignatures(ctx context.Context) ([][]byte, error) { +func (s *storageImageUnnamedDestination) GetSignatures(ctx context.Context) ([][]byte, error) { sigs := [][]byte{} first := 0 - for _, length := range s.SignatureSizes { - sigs = append(sigs, s.signatures[first:first+length]) + for _, length := range s.named.SignatureSizes { + sigs = append(sigs, s.named.signatures[first:first+length]) first += length } if first == 0 { diff --git a/storage/storage_reference.go b/storage/storage_reference.go index 674330b483..84eeeb25ea 100644 --- a/storage/storage_reference.go +++ b/storage/storage_reference.go @@ -6,6 +6,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/types" "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -18,9 +19,11 @@ type storageReference struct { reference string id string name reference.Named + tag string + digest digest.Digest } -func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { +func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference { // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by @@ -30,6 +33,8 @@ func newReference(transport storageTransport, reference, id string, name referen reference: reference, id: id, name: name, + tag: tag, + digest: digest, } } @@ -76,8 +81,21 @@ func (s storageReference) Transport() types.ImageTransport { } } -// Return a name with a tag, if we have a name to base them on. +// Return a name with a tag or digest, if we have either, else return it bare. func (s storageReference) DockerReference() reference.Named { + if s.name == nil { + return nil + } + if s.tag != "" { + if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil { + return namedTagged + } + } + if s.digest != "" { + if canonical, err := reference.WithDigest(s.name, s.digest); err == nil { + return canonical + } + } return s.name } @@ -91,7 +109,7 @@ func (s storageReference) StringWithinTransport() string { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.name == nil { + if s.reference == "" { return storeSpec + "@" + s.id } if s.id == "" { @@ -120,11 +138,8 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" namespaces := []string{} if s.name != nil { - if s.id != "" { - // The reference without the ID is also a valid namespace. - namespaces = append(namespaces, storeSpec+s.reference) - } - components := strings.Split(s.name.Name(), "/") + name := reference.TrimNamed(s.name) + components := strings.Split(name.String(), "/") for len(components) > 0 { namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) components = components[:len(components)-1] diff --git a/storage/storage_reference_test.go b/storage/storage_reference_test.go index 37ddcf3364..bc7100bbe6 100644 --- a/storage/storage_reference_test.go +++ b/storage/storage_reference_test.go @@ -52,7 +52,11 @@ var validReferenceTestCases = []struct { }, { "busybox@" + sha256digestHex, "docker.io/library/busybox:latest@" + sha256digestHex, - []string{"docker.io/library/busybox:latest", "docker.io/library/busybox", "docker.io/library", "docker.io"}, + []string{"docker.io/library/busybox", "docker.io/library", "docker.io"}, + }, + { + "busybox@sha256:" + sha256digestHex, "docker.io/library/busybox@sha256:" + sha256digestHex, + []string{"docker.io/library/busybox", "docker.io/library", "docker.io"}, }, } diff --git a/storage/storage_test.go b/storage/storage_test.go index f3f6a3305d..552a8a2c65 100644 --- a/storage/storage_test.go +++ b/storage/storage_test.go @@ -436,6 +436,7 @@ func TestWriteRead(t *testing.T) { if src == nil { t.Fatalf("NewImageSource(%q) returned no source", ref.StringWithinTransport()) } + // Note that we would strip a digest here, but not a tag. if src.Reference().StringWithinTransport() != ref.StringWithinTransport() { // As long as it's only the addition of an ID suffix, that's okay. if !strings.HasPrefix(src.Reference().StringWithinTransport(), ref.StringWithinTransport()+"@") { diff --git a/storage/storage_transport.go b/storage/storage_transport.go index 1a0ebd040d..39c81a5811 100644 --- a/storage/storage_transport.go +++ b/storage/storage_transport.go @@ -11,11 +11,14 @@ import ( "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/go-digest" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) +const ( + minimumTruncatedIDLength = 3 +) + func init() { transports.Register(Transport) } @@ -101,60 +104,124 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { var name reference.Named - var sum digest.Digest - var err error if ref == "" { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference") } if ref[0] == '[' { // Ignore the store specifier. closeIndex := strings.IndexRune(ref, ']') if closeIndex < 1 { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) } ref = ref[closeIndex+1:] } - refInfo := strings.SplitN(ref, "@", 2) - if len(refInfo) == 1 { - // A name. - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err + + // The last segment, if there's more than one, is either a digest from a reference, or an image ID. + split := strings.LastIndex(ref, "@") + idOrDigest := "" + if split != -1 { + // Peel off that last bit so that we can work on the rest. + idOrDigest = ref[split+1:] + if idOrDigest == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) } - } else if len(refInfo) == 2 { - // An ID, possibly preceded by a name. - if refInfo[0] != "" { - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err - } + ref = ref[:split] + } + + // The middle segment (now the last segment), if there is one, is a digest. + split = strings.LastIndex(ref, "@") + sum := digest.Digest("") + if split != -1 { + sum = digest.Digest(ref[split+1:]) + if sum == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) } - sum, err = digest.Parse(refInfo[1]) - if err != nil || sum.Validate() != nil { - sum, err = digest.Parse("sha256:" + refInfo[1]) - if err != nil || sum.Validate() != nil { - return nil, err - } + ref = ref[:split] + } + + // If we have something that unambiguously should be a digest, validate it, and then the third part, + // if we have one, as an ID. + id := "" + if sum != "" { + if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest) + } + if err := sum.Validate(); err != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + id = idOrDigest + if img, err := store.Image(idOrDigest); err == nil && img != nil && len(id) >= minimumTruncatedIDLength { + // The ID is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } + } else if idOrDigest != "" { + // There was no middle portion, so the final portion could be either a digest or an ID. + if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil { + // It's an ID. + id = idOrDigest + } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil { + // It's a digest. + sum = idSum + } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength { + // It's a truncated version of the ID of an image that's present in local storage, + // and we may need the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) } - } else { // Coverage: len(refInfo) is always 1 or 2 - // Anything else: store specified in a form we don't - // recognize. - return nil, ErrInvalidReference } + + // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" { + if img, err := store.Image(idOrDigest); err == nil && img != nil { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + // The initial portion is probably a name, possibly with a tag. + if ref != "" { + var err error + if name, err = reference.ParseNormalizedNamed(ref); err != nil { + return nil, errors.Wrapf(err, "error parsing named reference %q", ref) + } + } + if name == nil && sum == "" && id == "" { + return nil, errors.Errorf("error parsing reference") + } + + // Construct a copy of the store spec. optionsList := "" options := store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - id := "" - if sum.Validate() == nil { - id = sum.Hex() - } + + // Convert the name back into a reference string, if we got a name. refname := "" + tag := "" if name != nil { - name = reference.TagNameOnly(name) - refname = verboseName(name) + if sum.Validate() == nil { + canonical, err := reference.WithDigest(name, sum) + if err != nil { + return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum) + } + refname = verboseName(canonical) + } else { + name = reference.TagNameOnly(name) + tagged, ok := name.(reference.Tagged) + if !ok { + return nil, errors.Errorf("error parsing possibly-tagless name %q", ref) + } + refname = verboseName(name) + tag = tagged.Tag() + } } if refname == "" { logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) @@ -163,7 +230,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( } else { logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil } func (s *storageTransport) GetStore() (storage.Store, error) { @@ -182,11 +249,14 @@ func (s *storageTransport) GetStore() (storage.Store, error) { return s.store, nil } -// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or // "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or // "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", // tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { var store storage.Store // Check if there's a store location prefix. If there is, then it @@ -335,7 +405,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if err != nil { return err } - _, err = ddigest.Parse("sha256:" + scopeInfo[1]) + _, err = digest.Parse("sha256:" + scopeInfo[1]) if err != nil { return err } @@ -345,11 +415,28 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { return nil } -func verboseName(name reference.Named) string { - name = reference.TagNameOnly(name) +func verboseName(r reference.Reference) string { + if r == nil { + return "" + } + named, isNamed := r.(reference.Named) + digested, isDigested := r.(reference.Digested) + tagged, isTagged := r.(reference.Tagged) + name := "" tag := "" - if tagged, ok := name.(reference.NamedTagged); ok { - tag = ":" + tagged.Tag() + sum := "" + if isNamed { + name = (reference.TrimNamed(named)).String() + } + if isTagged { + if tagged.Tag() != "" { + tag = ":" + tagged.Tag() + } + } + if isDigested { + if digested.Digest().Validate() == nil { + sum = "@" + digested.Digest().String() + } } - return name.Name() + tag + return name + tag + sum } diff --git a/storage/storage_transport_test.go b/storage/storage_transport_test.go index bcccfcf45d..c5a4a6db15 100644 --- a/storage/storage_transport_test.go +++ b/storage/storage_transport_test.go @@ -26,7 +26,7 @@ func TestTransportParseStoreReference(t *testing.T) { {"[garbage]busybox", "docker.io/library/busybox:latest", ""}, // Store specifier is overridden by the store we pass to ParseStoreReference {"UPPERCASEISINVALID", "", ""}, // Invalid single-component name - {"sha256:" + sha256digestHex, "docker.io/library/sha256:" + sha256digestHex, ""}, // Valid single-component name; the hex part is not an ID unless it has a "@" prefix + {"sha256:" + sha256digestHex, "docker.io/library/sha256:" + sha256digestHex, ""}, // Valid single-component name; the hex part is not an ID unless it has a "@" prefix, so it looks like a tag {sha256digestHex, "", ""}, // Invalid single-component ID; not an ID without a "@" prefix, so it's parsed as a name, but names aren't allowed to look like IDs {"@" + sha256digestHex, "", sha256digestHex}, // Valid single-component ID {"sha256:ab", "docker.io/library/sha256:ab", ""}, // Valid single-component name, explicit tag @@ -37,8 +37,7 @@ func TestTransportParseStoreReference(t *testing.T) { {"UPPERCASEISINVALID@" + sha256digestHex, "", ""}, // Invalid name in name@ID {"busybox@ab", "", ""}, // Invalid ID in name@ID {"busybox@", "", ""}, // Empty ID in name@ID - {"busybox@sha256:" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid two-component name, with ID using "sha256:" prefix - {"@" + sha256digestHex, "", sha256digestHex}, // Valid two-component name, with ID only + {"busybox@sha256:" + sha256digestHex, "docker.io/library/busybox@sha256:" + sha256digestHex, ""}, // Valid two-component name, with a digest and no tag {"busybox@" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid two-component name, implicit tag {"busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid two-component name, explicit tag {"docker.io/library/busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid two-component name, everything explicit @@ -57,7 +56,8 @@ func TestTransportParseStoreReference(t *testing.T) { dockerRef, err := reference.ParseNormalizedNamed(c.expectedRef) require.NoError(t, err) require.NotNil(t, storageRef.name, c.input) - assert.Equal(t, dockerRef.String(), storageRef.name.String()) + assert.Equal(t, dockerRef.String(), storageRef.reference) + assert.Equal(t, dockerRef.String(), storageRef.DockerReference().String()) } } }