Skip to content

Commit

Permalink
Merge pull request #283 from nalind/multiple-manifests
Browse files Browse the repository at this point in the history
Teach images to hold multiple manifests
  • Loading branch information
rhatdan authored Feb 7, 2019
2 parents 0c69438 + 096e5b2 commit be84676
Show file tree
Hide file tree
Showing 48 changed files with 6,673 additions and 73 deletions.
3 changes: 3 additions & 0 deletions cmd/containers-storage/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ func image(flags *mflag.FlagSet, action string, m storage.Store, args []string)
for _, layerID := range image.MappedTopLayers {
fmt.Printf("Top Layer: %s\n", layerID)
}
for _, digest := range image.Digests {
fmt.Printf("Digest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("Data: %s\n", name)
}
Expand Down
6 changes: 6 additions & 0 deletions cmd/containers-storage/images.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ func images(flags *mflag.FlagSet, action string, m storage.Store, args []string)
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
Expand Down Expand Up @@ -67,6 +70,9 @@ func imagesByDigest(flags *mflag.FlagSet, action string, m storage.Store, args [
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
Expand Down
16 changes: 16 additions & 0 deletions docs/containers-storage-images-by-digest.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
## containers-storage-images-by-digest 1 "February 2019"

## NAME
containers-storage images-by-digest - List known images by digest

## SYNOPSIS
**containers-storage** **images-by-digest** *digest*

## DESCRIPTION
Retrieves information about images which match a specified digest

## EXAMPLE
**containers-storage images-by-digest sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855**

## SEE ALSO
containers-storage-image(1)
190 changes: 128 additions & 62 deletions images.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@ import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"

"github.com/containers/image/manifest"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
Expand All @@ -15,9 +17,13 @@ import (
)

const (
// ImageDigestBigDataKey is the name of the big data item whose
// contents we consider useful for computing a "digest" of the
// image, by which we can locate the image later.
// ImageDigestManifestBigDataNamePrefix is a prefix of big data item
// names which we consider to be manifests, used for computing a
// "digest" value for the image as a whole, by which we can locate the
// image later.
ImageDigestManifestBigDataNamePrefix = "manifest"
// ImageDigestBigDataKey is provided for compatibility with older
// versions of the image library. It will be removed in the future.
ImageDigestBigDataKey = "manifest"
)

Expand All @@ -27,12 +33,19 @@ type Image struct {
// value which was generated by the library.
ID string `json:"id"`

// Digest is a digest value that we can use to locate the image.
// Digest is a digest value that we can use to locate the image, if one
// was specified at creation-time.
Digest digest.Digest `json:"digest,omitempty"`

// Digests is a list of digest values of the image's manifests, and
// possibly a manually-specified value, that we can use to locate the
// image. If Digest is set, its value is also in this list.
Digests []digest.Digest `json:"-"`

// Names is an optional set of user-defined convenience values. The
// image can be referred to by its ID or any of its names. Names are
// unique among images.
// unique among images, and are often the text representation of tagged
// or canonical references.
Names []string `json:"names,omitempty"`

// TopLayer is the ID of the topmost layer of the image itself, if the
Expand Down Expand Up @@ -92,8 +105,10 @@ type ROImageStore interface {
// Images returns a slice enumerating the known images.
Images() ([]Image, error)

// Images returns a slice enumerating the images which have a big data
// item with the name ImageDigestBigDataKey and the specified digest.
// ByDigest returns a slice enumerating the images which have either an
// explicitly-set digest, or a big data item with a name that starts
// with ImageDigestManifestBigDataNamePrefix, which matches the
// specified digest.
ByDigest(d digest.Digest) ([]*Image, error)
}

Expand All @@ -111,7 +126,8 @@ type ImageStore interface {
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)

// SetNames replaces the list of names associated with an image with the
// supplied values.
// supplied values. The values are expected to be valid normalized
// named image references.
SetNames(id string, names []string) error

// Delete removes the record of the image.
Expand All @@ -135,6 +151,7 @@ func copyImage(i *Image) *Image {
return &Image{
ID: i.ID,
Digest: i.Digest,
Digests: copyDigestSlice(i.Digests),
Names: copyStringSlice(i.Names),
TopLayer: i.TopLayer,
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
Expand All @@ -147,6 +164,17 @@ func copyImage(i *Image) *Image {
}
}

func copyImageSlice(slice []*Image) []*Image {
if len(slice) > 0 {
cp := make([]*Image, len(slice))
for i := range slice {
cp[i] = copyImage(slice[i])
}
return cp
}
return nil
}

func (r *imageStore) Images() ([]Image, error) {
images := make([]Image, len(r.images))
for i := range r.images {
Expand All @@ -167,6 +195,43 @@ func (r *imageStore) datapath(id, key string) string {
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
}

// bigDataNameIsManifest determines if a big data item with the specified name
// is considered to be representative of the image, in that its digest can be
// said to also be the image's digest. Currently, if its name is, or begins
// with, "manifest", we say that it is.
func bigDataNameIsManifest(name string) bool {
return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix)
}

// recomputeDigests takes a fixed digest and a name-to-digest map and builds a
// list of the unique values that would identify the image.
func (image *Image) recomputeDigests() error {
validDigests := make([]digest.Digest, 0, len(image.BigDataDigests)+1)
digests := make(map[digest.Digest]struct{})
if image.Digest != "" {
if err := image.Digest.Validate(); err != nil {
return errors.Wrapf(err, "error validating image digest %q", string(image.Digest))
}
digests[image.Digest] = struct{}{}
validDigests = append(validDigests, image.Digest)
}
for name, digest := range image.BigDataDigests {
if !bigDataNameIsManifest(name) {
continue
}
if digest.Validate() != nil {
return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name)
}
// Deduplicate the digest values.
if _, known := digests[digest]; !known {
digests[digest] = struct{}{}
validDigests = append(validDigests, digest)
}
}
image.Digests = validDigests
return nil
}

func (r *imageStore) Load() error {
shouldSave := false
rpath := r.imagespath()
Expand All @@ -189,17 +254,18 @@ func (r *imageStore) Load() error {
r.removeName(conflict, name)
shouldSave = true
}
names[name] = images[n]
}
// Implicit digest
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
digests[digest] = append(digests[digest], images[n])
// Compute the digest list.
err = image.recomputeDigests()
if err != nil {
return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names)
}
// Explicit digest
if image.Digest == "" {
image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
} else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
digests[image.Digest] = append(digests[image.Digest], images[n])
for _, name := range image.Names {
names[name] = image
}
for _, digest := range image.Digests {
list := digests[digest]
digests[digest] = append(list, image)
}
}
}
Expand Down Expand Up @@ -333,12 +399,12 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
}
}
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id)
}
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, ErrDuplicateName
if image, nameInUse := r.byname[name]; nameInUse {
return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID)
}
}
if created.IsZero() {
Expand All @@ -348,6 +414,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
image = &Image{
ID: id,
Digest: searchableDigest,
Digests: nil,
Names: names,
TopLayer: layer,
Metadata: metadata,
Expand All @@ -357,16 +424,20 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
Created: created,
Flags: make(map[string]interface{}),
}
err := image.recomputeDigests()
if err != nil {
return nil, errors.Wrapf(err, "error validating digests for new image")
}
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
if searchableDigest != "" {
list := r.bydigest[searchableDigest]
r.bydigest[searchableDigest] = append(list, image)
}
for _, name := range names {
r.byname[name] = image
}
for _, digest := range image.Digests {
list := r.bydigest[digest]
r.bydigest[digest] = append(list, image)
}
err = r.Save()
image = copyImage(image)
}
Expand Down Expand Up @@ -444,6 +515,14 @@ func (r *imageStore) Delete(id string) error {
for _, name := range image.Names {
delete(r.byname, name)
}
for _, digest := range image.Digests {
prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
if toDeleteIndex != -1 {
// delete the image at toDeleteIndex
if toDeleteIndex == len(r.images)-1 {
Expand All @@ -452,28 +531,6 @@ func (r *imageStore) Delete(id string) error {
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
}
}
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
// remove the image from the digest-based index
if list, ok := r.bydigest[digest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
}
if image.Digest != "" {
// remove the image's hard-coded digest from the digest-based index
if list, ok := r.bydigest[image.Digest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, image.Digest)
} else {
r.bydigest[image.Digest] = prunedList
}
}
}
if err := r.Save(); err != nil {
return err
}
Expand Down Expand Up @@ -504,7 +561,7 @@ func (r *imageStore) Exists(id string) bool {

func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
if images, ok := r.bydigest[d]; ok {
return images, nil
return copyImageSlice(images), nil
}
return nil, ErrImageUnknown
}
Expand Down Expand Up @@ -606,10 +663,19 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
if !ok {
return ErrImageUnknown
}
if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil {
err := os.MkdirAll(r.datadir(image.ID), 0700)
if err != nil {
return err
}
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
var newDigest digest.Digest
if bigDataNameIsManifest(key) {
if newDigest, err = manifest.Digest(data); err != nil {
return errors.Wrapf(err, "error digesting manifest")
}
} else {
newDigest = digest.Canonical.FromBytes(data)
}
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil {
save := false
if image.BigDataSizes == nil {
Expand All @@ -621,7 +687,6 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
image.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := image.BigDataDigests[key]
newDigest := digest.Canonical.FromBytes(data)
image.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
Expand All @@ -637,20 +702,21 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
if key == ImageDigestBigDataKey {
if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
// remove the image from the list of images in the digest-based
// index which corresponds to the old digest for this item, unless
// it's also the hard-coded digest
if list, ok := r.bydigest[oldDigest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
r.bydigest[oldDigest] = prunedList
}
for _, oldDigest := range image.Digests {
// remove the image from the list of images in the digest-based index
if list, ok := r.bydigest[oldDigest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
r.bydigest[oldDigest] = prunedList
}
}
}
if err = image.recomputeDigests(); err != nil {
return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID)
}
for _, newDigest := range image.Digests {
// add the image to the list of images in the digest-based index which
// corresponds to the new digest for this item, unless it's already there
list := r.bydigest[newDigest]
Expand Down
2 changes: 1 addition & 1 deletion images_ffjson.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit be84676

Please sign in to comment.