Skip to content

Commit

Permalink
Enable to specify compression type of all layers of the finally expor…
Browse files Browse the repository at this point in the history
…ted image

Signed-off-by: ktock <[email protected]>
  • Loading branch information
ktock committed Apr 1, 2021
1 parent 3b49f99 commit a7b0faa
Show file tree
Hide file tree
Showing 11 changed files with 968 additions and 37 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,8 @@ Keys supported by image output:
* `unpack=true`: unpack image after creation (for use with containerd)
* `dangling-name-prefix=[value]`: name image with `prefix@<digest>` , used for anonymous images
* `name-canonical=true`: add additional canonical name `name@<digest>`
* `compression=[uncompressed,gzip]`: choose compression type for layer, gzip is default value

* `compression=[uncompressed,gzip]`: choose compression type for layers newly created and cached, gzip is default value
* `compression-all=[uncompressed,gzip]`: choose compression type for all layers (including already existing layers) for exporting. compression type specified by `compression` flag is respected by default.

If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`.
`$DOCKER_CONFIG` defaults to `~/.docker`.
Expand Down
63 changes: 63 additions & 0 deletions client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1857,6 +1857,21 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)

allCompressedTarget := registry + "/buildkit/build/exporter:withallcompressed"
_, err = c.Solve(context.TODO(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
"name": allCompressedTarget,
"push": "true",
"compression-all": "gzip",
},
},
},
}, nil)
require.NoError(t, err)

if cdAddress == "" {
t.Skip("rest of test requires containerd worker")
}
Expand All @@ -1865,9 +1880,12 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete())
require.NoError(t, err)
err = client.ImageService().Delete(ctx, allCompressedTarget, images.SynchronousDelete())
require.NoError(t, err)

checkAllReleasable(t, c, sb, true)

// check if the new layer is compressed with compression option
img, err := client.Pull(ctx, compressedTarget)
require.NoError(t, err)

Expand Down Expand Up @@ -1906,6 +1924,51 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
require.True(t, ok)
require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
require.Equal(t, []byte("gzip"), item.Data)

err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete())
require.NoError(t, err)

checkAllReleasable(t, c, sb, true)

// check if all layers are compressed with compression-all option
img, err = client.Pull(ctx, allCompressedTarget)
require.NoError(t, err)

dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target())
require.NoError(t, err)

mfst = struct {
MediaType string `json:"mediaType,omitempty"`
ocispec.Manifest
}{}

err = json.Unmarshal(dt, &mfst)
require.NoError(t, err)
require.Equal(t, 2, len(mfst.Layers))
require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType)
require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType)

dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispec.Descriptor{Digest: mfst.Layers[0].Digest})
require.NoError(t, err)

m, err = testutil.ReadTarToMap(dt, true)
require.NoError(t, err)

item, ok = m["data"]
require.True(t, ok)
require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
require.Equal(t, []byte("uncompressed"), item.Data)

dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispec.Descriptor{Digest: mfst.Layers[1].Digest})
require.NoError(t, err)

m, err = testutil.ReadTarToMap(dt, true)
require.NoError(t, err)

item, ok = m["data"]
require.True(t, ok)
require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
require.Equal(t, []byte("gzip"), item.Data)
}

func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) {
Expand Down
94 changes: 94 additions & 0 deletions exporter/containerimage/converter.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
package containerimage

import (
"compress/gzip"
"context"
"fmt"
"io"

"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/converter/uncompress"
"github.com/containerd/containerd/labels"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)

func gzipLayerConvertFunc(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
if !images.IsLayerType(desc.MediaType) || isGzipCompressedType(desc.MediaType) {
// No conversion. No need to return an error here.
return nil, nil
}

// prepare the source and destination
info, err := cs.Info(ctx, desc.Digest)
if err != nil {
return nil, err
}
labelz := info.Labels
if labelz == nil {
labelz = make(map[string]string)
}
ra, err := cs.ReaderAt(ctx, desc)
if err != nil {
return nil, err
}
defer ra.Close()
ref := fmt.Sprintf("convert-gzip-from-%s", desc.Digest)
w, err := cs.Writer(ctx, content.WithRef(ref))
if err != nil {
return nil, err
}
defer w.Close()
if err := w.Truncate(0); err != nil { // Old written data possibly remains
return nil, err
}
zw := gzip.NewWriter(w)
defer zw.Close()

// convert this layer
diffID := digest.Canonical.Digester()
if _, err := io.Copy(zw, io.TeeReader(io.NewSectionReader(ra, 0, ra.Size()), diffID.Hash())); err != nil {
return nil, err
}
if err := zw.Close(); err != nil { // Flush the writer
return nil, err
}
labelz[labels.LabelUncompressed] = diffID.Digest().String() // update diffID label
if err = w.Commit(ctx, 0, "", content.WithLabels(labelz)); err != nil && !errdefs.IsAlreadyExists(err) {
return nil, err
}
if err := w.Close(); err != nil {
return nil, err
}
info, err = cs.Info(ctx, w.Digest())
if err != nil {
return nil, err
}

newDesc := desc
if uncompress.IsUncompressedType(newDesc.MediaType) {
if images.IsDockerType(newDesc.MediaType) {
newDesc.MediaType += ".gzip"
} else {
newDesc.MediaType += "+gzip"
}
}
newDesc.Digest = info.Digest
newDesc.Size = info.Size
return &newDesc, nil
}

func isGzipCompressedType(mt string) bool {
switch mt {
case
images.MediaTypeDockerSchema2LayerGzip,
images.MediaTypeDockerSchema2LayerForeignGzip,
ocispec.MediaTypeImageLayerGzip,
ocispec.MediaTypeImageLayerNonDistributableGzip:
return true
default:
return false
}
}
56 changes: 34 additions & 22 deletions exporter/containerimage/export.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,16 @@ import (
)

const (
keyImageName = "name"
keyPush = "push"
keyPushByDigest = "push-by-digest"
keyInsecure = "registry.insecure"
keyUnpack = "unpack"
keyDanglingPrefix = "dangling-name-prefix"
keyNameCanonical = "name-canonical"
keyLayerCompression = "compression"
ociTypes = "oci-mediatypes"
keyImageName = "name"
keyPush = "push"
keyPushByDigest = "push-by-digest"
keyInsecure = "registry.insecure"
keyUnpack = "unpack"
keyDanglingPrefix = "dangling-name-prefix"
keyNameCanonical = "name-canonical"
keyLayerCompression = "compression"
keyLayerCompressionAll = "compression-all"
ociTypes = "oci-mediatypes"
)

type Opt struct {
Expand All @@ -63,8 +64,9 @@ func New(opt Opt) (exporter.Exporter, error) {

func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
i := &imageExporterInstance{
imageExporter: e,
layerCompression: compression.Default,
imageExporter: e,
layerCompression: compression.Default,
layerCompressionAll: compression.Any,
}

for k, v := range opt {
Expand Down Expand Up @@ -142,6 +144,15 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
default:
return nil, errors.Errorf("unsupported layer compression type: %v", v)
}
case keyLayerCompressionAll:
switch v {
case "gzip":
i.layerCompressionAll = compression.Gzip
case "uncompressed":
i.layerCompressionAll = compression.Uncompressed
default:
return nil, errors.Errorf("unsupported layer compression type: %v", v)
}
default:
if i.meta == nil {
i.meta = make(map[string][]byte)
Expand All @@ -154,16 +165,17 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp

type imageExporterInstance struct {
*imageExporter
targetName string
push bool
pushByDigest bool
unpack bool
insecure bool
ociTypes bool
nameCanonical bool
danglingPrefix string
layerCompression compression.Type
meta map[string][]byte
targetName string
push bool
pushByDigest bool
unpack bool
insecure bool
ociTypes bool
nameCanonical bool
danglingPrefix string
layerCompression compression.Type
layerCompressionAll compression.Type
meta map[string][]byte
}

func (e *imageExporterInstance) Name() string {
Expand All @@ -184,7 +196,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
}
defer done(context.TODO())

desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.layerCompression, sessionID)
desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.layerCompression, e.layerCompressionAll, sessionID)
if err != nil {
return nil, err
}
Expand Down
55 changes: 54 additions & 1 deletion exporter/containerimage/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ import (
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/converter"
"github.com/containerd/containerd/images/converter/uncompress"
"github.com/containerd/containerd/platforms"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/exporter"
Expand Down Expand Up @@ -44,7 +46,7 @@ type ImageWriter struct {
opt WriterOpt
}

func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool, compressionType compression.Type, sessionID string) (*ocispec.Descriptor, error) {
func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool, compressionType compression.Type, forceCompressionType compression.Type, sessionID string) (*ocispec.Descriptor, error) {
platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]

if len(inp.Refs) > 0 && !ok {
Expand All @@ -64,6 +66,16 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool
mfstDesc.Annotations = make(map[string]string)
}
mfstDesc.Annotations["config.digest"] = configDesc.Digest.String()

if forceCompressionType != compression.Any {
cvtDone := oneOffProgress(ctx, "converting manifest "+(*mfstDesc).Digest.String())
mfstDesc, err = ic.ensureCompressionType(ctx, *mfstDesc, remotes, forceCompressionType)
if err != nil {
return nil, cvtDone(errors.Wrapf(err, "error converting manifest compression %s", (*mfstDesc).Digest))
}
cvtDone(nil)
}

return mfstDesc, nil
}

Expand Down Expand Up @@ -145,6 +157,16 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool
}
idxDone(nil)

if forceCompressionType != compression.Any {
cvtDone := oneOffProgress(ctx, "converting manifest list "+idxDigest.String())
newDesc, err := ic.ensureCompressionType(ctx, idxDesc, remotes, forceCompressionType)
if err != nil {
return nil, cvtDone(errors.Wrapf(err, "error converting manifest list compression %s", idxDigest))
}
idxDesc = *newDesc
cvtDone(nil)
}

return &idxDesc, nil
}

Expand Down Expand Up @@ -177,6 +199,37 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, compressionType compres
return out, nil
}

func (ic *ImageWriter) ensureCompressionType(ctx context.Context, idxDesc ocispec.Descriptor, remotes []solver.Remote, compressionType compression.Type) (*ocispec.Descriptor, error) {
var layerConvertFunc converter.ConvertFunc
switch compressionType {
case compression.Any:
// If no compression type is specified, we don't need to convert it.
// lazy layers remain lazy.
return &idxDesc, nil
case compression.Uncompressed:
layerConvertFunc = uncompress.LayerConvertFunc
case compression.Gzip:
layerConvertFunc = gzipLayerConvertFunc
default:
return nil, fmt.Errorf("unknown compression type during conversion: %q", compressionType)
}

// unlazy layers as converter uses layer contents in the content store
// TODO(ktock): un-lazy only layers whose type is different from the target, selectively.
// this will requires to patch containerd converter API.
for _, r := range remotes {
if unlazier, ok := r.Provider.(cache.Unlazier); ok {
if err := unlazier.Unlazy(ctx); err != nil {
return nil, err
}
}
}

// convert the index. respect the platform and spec(OCI or Docker) of the original index.
return converter.DefaultIndexConvertFunc(layerConvertFunc, false, platforms.All)(
ctx, ic.opt.ContentStore, idxDesc)
}

func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache.ImmutableRef, config []byte, remote *solver.Remote, oci bool, inlineCache []byte) (*ocispec.Descriptor, *ocispec.Descriptor, error) {
if len(config) == 0 {
var err error
Expand Down
Loading

0 comments on commit a7b0faa

Please sign in to comment.