Skip to content

Commit

Permalink
Teach images to hold multiple manifests
Browse files Browse the repository at this point in the history
Change how we compute digests for BigData items with names that start
with "manifest" so that we use the image library's manifest.Digest()
function, which knows how to preprocess schema1 manifests to get the
right value, instead of just trying to finesse it.

Track the digests of multiple manifest-named items for images.

Signed-off-by: Nalin Dahyabhai <[email protected]>
  • Loading branch information
nalind committed Feb 6, 2019
1 parent 488134f commit ac2acc8
Show file tree
Hide file tree
Showing 6 changed files with 217 additions and 61 deletions.
3 changes: 3 additions & 0 deletions cmd/containers-storage/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ func image(flags *mflag.FlagSet, action string, m storage.Store, args []string)
for _, layerID := range image.MappedTopLayers {
fmt.Printf("Top Layer: %s\n", layerID)
}
for _, digest := range image.Digests {
fmt.Printf("Digest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("Data: %s\n", name)
}
Expand Down
6 changes: 6 additions & 0 deletions cmd/containers-storage/images.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ func images(flags *mflag.FlagSet, action string, m storage.Store, args []string)
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
Expand Down Expand Up @@ -67,6 +70,9 @@ func imagesByDigest(flags *mflag.FlagSet, action string, m storage.Store, args [
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
Expand Down
16 changes: 16 additions & 0 deletions docs/containers-storage-images-by-digest.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
## containers-storage-images-by-digest 1 "February 2019"

## NAME
containers-storage images-by-digest - List known images by digest

## SYNOPSIS
**containers-storage** **images-by-digest** *digest*

## DESCRIPTION
Retrieves information about images which match a specified digest

## EXAMPLE
**containers-storage images-by-digest sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855**

## SEE ALSO
containers-storage-image(1)
177 changes: 116 additions & 61 deletions images.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@ import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"

"github.com/containers/image/manifest"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
Expand All @@ -15,9 +17,13 @@ import (
)

const (
// ImageDigestBigDataKey is the name of the big data item whose
// contents we consider useful for computing a "digest" of the
// image, by which we can locate the image later.
// ImageDigestManifestBigDataNamePrefix is a prefix of big data item
// names which we consider to be manifests, used for computing a
// "digest" value for the image as a whole, by which we can locate the
// image later.
ImageDigestManifestBigDataNamePrefix = "manifest"
// ImageDigestBigDataKey is provided for compatibility with older
// versions of the image library. It will be removed in the future.
ImageDigestBigDataKey = "manifest"
)

Expand All @@ -27,12 +33,19 @@ type Image struct {
// value which was generated by the library.
ID string `json:"id"`

// Digest is a digest value that we can use to locate the image.
// Digest is a digest value that we can use to locate the image, if one
// was specified at creation-time.
Digest digest.Digest `json:"digest,omitempty"`

// Digests is a list of digest values of the image's manifests, and
// possibly a manually-specified value, that we can use to locate the
// image. If Digest is set, its value is also in this list.
Digests []digest.Digest `json:"-"`

// Names is an optional set of user-defined convenience values. The
// image can be referred to by its ID or any of its names. Names are
// unique among images.
// unique among images, and are often the text representation of tagged
// or canonical references.
Names []string `json:"names,omitempty"`

// TopLayer is the ID of the topmost layer of the image itself, if the
Expand Down Expand Up @@ -92,8 +105,10 @@ type ROImageStore interface {
// Images returns a slice enumerating the known images.
Images() ([]Image, error)

// Images returns a slice enumerating the images which have a big data
// item with the name ImageDigestBigDataKey and the specified digest.
// ByDigest returns a slice enumerating the images which have either an
// explicitly-set digest, or a big data item with a name that starts
// with ImageDigestManifestBigDataNamePrefix, which matches the
// specified digest.
ByDigest(d digest.Digest) ([]*Image, error)
}

Expand All @@ -111,7 +126,8 @@ type ImageStore interface {
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)

// SetNames replaces the list of names associated with an image with the
// supplied values.
// supplied values. The values are expected to be valid normalized
// named image references.
SetNames(id string, names []string) error

// Delete removes the record of the image.
Expand All @@ -135,6 +151,7 @@ func copyImage(i *Image) *Image {
return &Image{
ID: i.ID,
Digest: i.Digest,
Digests: copyDigestSlice(i.Digests),
Names: copyStringSlice(i.Names),
TopLayer: i.TopLayer,
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
Expand Down Expand Up @@ -167,6 +184,42 @@ func (r *imageStore) datapath(id, key string) string {
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
}

// bigDataNameIsManifest determines if a big data item with the specified name
// is considered to be representative of the image, in that its digest can be
// said to also be the image's digest. Currently, if its name is, or begins
// with, "manifest", we say that it is.
func bigDataNameIsManifest(name string) bool {
return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix)
}

// recomputeDigests takes a fixed digest and a name-to-digest map and builds a
// list of the unique values that would identify the image.
func recomputeDigests(singleDigest digest.Digest, bigDataDigests map[string]digest.Digest) (validDigests []digest.Digest, err error) {
validDigests = make([]digest.Digest, 0, len(bigDataDigests)+1)
digests := make(map[digest.Digest]struct{})
if singleDigest != "" {
if err = singleDigest.Validate(); err != nil {
return nil, errors.Wrapf(err, "error validating digest %q", string(singleDigest))
}
digests[singleDigest] = struct{}{}
validDigests = append(validDigests, singleDigest)
}
for name, digest := range bigDataDigests {
if !bigDataNameIsManifest(name) {
continue
}
if digest.Validate() != nil {
return nil, errors.Wrapf(digest.Validate(), "error validating digest %q", string(digest))
}
// Deduplicate the digest values.
if _, known := digests[digest]; !known {
digests[digest] = struct{}{}
validDigests = append(validDigests, digest)
}
}
return validDigests, nil
}

func (r *imageStore) Load() error {
shouldSave := false
rpath := r.imagespath()
Expand All @@ -189,17 +242,18 @@ func (r *imageStore) Load() error {
r.removeName(conflict, name)
shouldSave = true
}
names[name] = images[n]
}
// Implicit digest
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
digests[digest] = append(digests[digest], images[n])
// Compute the digest list.
image.Digests, err = recomputeDigests(image.Digest, image.BigDataDigests)
if err != nil {
return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names)
}
// Explicit digest
if image.Digest == "" {
image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
} else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
digests[image.Digest] = append(digests[image.Digest], images[n])
for _, name := range image.Names {
names[name] = image
}
for _, digest := range image.Digests {
list := digests[digest]
digests[digest] = append(list, image)
}
}
}
Expand Down Expand Up @@ -333,12 +387,16 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
}
}
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id)
}
digests, err := recomputeDigests(searchableDigest, nil)
if err != nil {
return nil, errors.Wrapf(err, "error validating digests (%v) for new image", names)
}
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, ErrDuplicateName
if image, nameInUse := r.byname[name]; nameInUse {
return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID)
}
}
if created.IsZero() {
Expand All @@ -348,6 +406,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
image = &Image{
ID: id,
Digest: searchableDigest,
Digests: digests,
Names: names,
TopLayer: layer,
Metadata: metadata,
Expand All @@ -360,13 +419,13 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
if searchableDigest != "" {
list := r.bydigest[searchableDigest]
r.bydigest[searchableDigest] = append(list, image)
}
for _, name := range names {
r.byname[name] = image
}
for _, digest := range digests {
list := r.bydigest[digest]
r.bydigest[digest] = append(list, image)
}
err = r.Save()
image = copyImage(image)
}
Expand Down Expand Up @@ -444,6 +503,14 @@ func (r *imageStore) Delete(id string) error {
for _, name := range image.Names {
delete(r.byname, name)
}
for _, digest := range image.Digests {
prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
if toDeleteIndex != -1 {
// delete the image at toDeleteIndex
if toDeleteIndex == len(r.images)-1 {
Expand All @@ -452,28 +519,6 @@ func (r *imageStore) Delete(id string) error {
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
}
}
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
// remove the image from the digest-based index
if list, ok := r.bydigest[digest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
}
if image.Digest != "" {
// remove the image's hard-coded digest from the digest-based index
if list, ok := r.bydigest[image.Digest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, image.Digest)
} else {
r.bydigest[image.Digest] = prunedList
}
}
}
if err := r.Save(); err != nil {
return err
}
Expand Down Expand Up @@ -606,10 +651,19 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
if !ok {
return ErrImageUnknown
}
if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil {
err := os.MkdirAll(r.datadir(image.ID), 0700)
if err != nil {
return err
}
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
var newDigest digest.Digest
if bigDataNameIsManifest(key) {
if newDigest, err = manifest.Digest(data); err != nil {
return errors.Wrapf(err, "error digesting manifest")
}
} else {
newDigest = digest.Canonical.FromBytes(data)
}
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil {
save := false
if image.BigDataSizes == nil {
Expand All @@ -621,7 +675,6 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
image.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := image.BigDataDigests[key]
newDigest := digest.Canonical.FromBytes(data)
image.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
Expand All @@ -637,20 +690,22 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
if key == ImageDigestBigDataKey {
if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
// remove the image from the list of images in the digest-based
// index which corresponds to the old digest for this item, unless
// it's also the hard-coded digest
if list, ok := r.bydigest[oldDigest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
r.bydigest[oldDigest] = prunedList
}
for _, oldDigest := range image.Digests {
// remove the image from the list of images in the digest-based index
if list, ok := r.bydigest[oldDigest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
r.bydigest[oldDigest] = prunedList
}
}
}
image.Digests, err = recomputeDigests(image.Digest, image.BigDataDigests)
if err != nil {
return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID)
}
for _, newDigest := range image.Digests {
// add the image to the list of images in the digest-based index which
// corresponds to the new digest for this item, unless it's already there
list := r.bydigest[newDigest]
Expand Down
9 changes: 9 additions & 0 deletions store.go
Original file line number Diff line number Diff line change
Expand Up @@ -3157,6 +3157,15 @@ func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
return ret
}

func copyDigestSlice(slice []digest.Digest) []digest.Digest {
if len(slice) == 0 {
return nil
}
ret := make([]digest.Digest, len(slice))
copy(ret, slice)
return ret
}

// copyStringInterfaceMap still forces us to assume that the interface{} is
// a non-pointer scalar value
func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
Expand Down
Loading

0 comments on commit ac2acc8

Please sign in to comment.