-
Notifications
You must be signed in to change notification settings - Fork 384
/
Copy pathstorage_dest.go
1510 lines (1393 loc) · 67.1 KB
/
storage_dest.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"slices"
"sync"
"sync/atomic"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
srcImpl "github.com/containers/image/v5/internal/imagesource/impl"
srcStubs "github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chunked"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
var (
// ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob
// with a digest-based name that doesn't match its contents.
// Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value),
// and there is no known user of this error.
ErrBlobDigestMismatch = errors.New("blob digest mismatch")
// ErrBlobSizeMismatch is returned when PutBlob() is given a blob
// with an expected size that doesn't match the reader.
ErrBlobSizeMismatch = errors.New("blob size mismatch")
)
type storageImageDestination struct {
impl.Compat
impl.PropertyMethodsInitialize
stubs.ImplementsPutBlobPartial
stubs.AlwaysSupportsSignatures
imageRef storageReference
directory string // Temporary directory where we store blobs until Commit() time
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // (Per-instance) manifest contents, or nil if not yet known.
manifestMIMEType string // Valid if manifest != nil
manifestDigest digest.Digest // Valid if manifest != nil
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
signatures []byte // Signature contents, temporary
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
metadata storageImageMetadata // Metadata contents being built
// Mapping from layer (by index) to the associated ID in the storage.
// It's protected *implicitly* since `commitLayer()`, at any given
// time, can only be executed by *one* goroutine. Please refer to
// `queueOrCommit()` for further details on how the single-caller
// guarantee is implemented.
indexToStorageID map[int]string
// A storage destination may be used concurrently, due to HasThreadSafePutBlob.
lock sync.Mutex // Protects lockProtected
lockProtected storageImageDestinationLockProtected
}
// storageImageDestinationLockProtected contains storageImageDestination data which might be
// accessed concurrently, due to HasThreadSafePutBlob.
// _During the concurrent TryReusingBlob/PutBlob/* calls_ (but not necessarily during the final Commit)
// uses must hold storageImageDestination.lock.
type storageImageDestinationLockProtected struct {
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
// Externally, a layer is identified either by (compressed) digest, or by TOC digest
// (and we assume the TOC digest also uniquely identifies the contents, i.e. there aren’t two
// different formats/ways to parse a single TOC); internally, we use uncompressed digest (“DiffID”) or a TOC digest.
// We may or may not know the relationships between these three values.
//
// When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values
// we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not
// recompute those values for incoming layers — the point of the reuse is that we don’t need to consume the incoming layer.)
//
// Layer identification: For a layer, at least one of (indexToDiffID, indexToTOCDigest, blobDiffIDs) must be available
// before commitLayer is called.
// The layer is identified by the first of the three fields which exists, in that order (and the value must be trusted).
//
// WARNING: All values in indexToDiffID, indexToTOCDigest, and blobDiffIDs are _individually_ trusted, but blobDiffIDs is more subtle.
// The values in indexTo* are all consistent, because the code writing them processed them all at once, and consistently.
// But it is possible for a layer’s indexToDiffID an indexToTOCDigest to be based on a TOC, without setting blobDiffIDs
// for the compressed digest of that index, and for blobDiffIDs[compressedDigest] to be set _separately_ while processing some
// other layer entry. In particular it is possible for indexToDiffID[index] and blobDiffIDs[compressedDigestAtIndex]] to refer
// to mismatching contents.
// Users of these fields should use trustedLayerIdentityDataLocked, which centralizes the validity logic,
// instead of interpreting these fields, especially blobDiffIDs, directly.
//
// Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller
// to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases.
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above.
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
// should be available; or indexToDiffID/indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
// They are looked up in the order they are mentioned above.
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer
// Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set.
filenames map[digest.Digest]string
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
fileSizes map[digest.Digest]int64
// Config
configDigest digest.Digest // "" if N/A or not known yet.
}
// addedLayerInfo records data about a layer to use in this image.
type addedLayerInfo struct {
digest digest.Digest // Mandatory, the digest of the layer.
emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
}
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
// it's time to Commit() the image
func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
directory, err := tmpdir.MkDirBigFileTemp(sys, "storage")
if err != nil {
return nil, fmt.Errorf("creating a temporary directory: %w", err)
}
dest := &storageImageDestination{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
SupportedManifestMIMETypes: []string{
imgspecv1.MediaTypeImageManifest,
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
},
// We ultimately have to decompress layers to populate trees on disk
// and need to explicitly ask for it here, so that the layers' MIME
// types can be set accordingly.
DesiredLayerCompression: types.PreserveOriginal,
AcceptsForeignLayerURLs: false,
MustMatchRuntimeOS: true,
IgnoresEmbeddedDockerReference: true, // Yes, we want the unmodified manifest
HasThreadSafePutBlob: true,
}),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
metadata: storageImageMetadata{
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
},
indexToStorageID: make(map[int]string),
lockProtected: storageImageDestinationLockProtected{
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
indexToDiffID: make(map[int]digest.Digest),
indexToTOCDigest: make(map[int]digest.Digest),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
indexToAdditionalLayer: make(map[int]storage.AdditionalLayer),
filenames: make(map[digest.Digest]string),
fileSizes: make(map[digest.Digest]int64),
},
}
dest.Compat = impl.AddCompat(dest)
return dest, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (s *storageImageDestination) Reference() types.ImageReference {
return s.imageRef
}
// Close cleans up the temporary directory and additional layer store handlers.
func (s *storageImageDestination) Close() error {
// This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
for _, al := range s.lockProtected.indexToAdditionalLayer {
al.Release()
}
for _, v := range s.lockProtected.diffOutputs {
_ = s.imageRef.transport.store.CleanupStagedLayer(v)
}
return os.RemoveAll(s.directory)
}
func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
}
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
// The destination can use it in its TryReusingBlob/PutBlob implementations
// (otherwise it only obtains the final config after all layers are written).
func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
if configErr != nil {
return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr)
}
s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig)
return nil
}
// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
if err != nil {
return info, err
}
if options.IsConfig {
s.lock.Lock()
defer s.lock.Unlock()
if s.lockProtected.configDigest != "" {
return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q",
s.lockProtected.configDigest.String(), info.Digest.String())
}
s.lockProtected.configDigest = info.Digest
return info, nil
}
if options.LayerIndex == nil {
return info, nil
}
return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
digest: info.Digest,
emptyLayer: options.EmptyLayer,
})
}
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) {
// Stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data.
if blobinfo.Digest != "" {
if err := blobinfo.Digest.Validate(); err != nil {
return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
}
}
// Set up to digest the blob if necessary, and count its size while saving it to a file.
filename := s.computeNextBlobCacheFile()
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
}
defer file.Close()
counter := ioutils.NewWriteCounter(file)
stream = io.TeeReader(stream, counter)
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
decompressed, err := archive.DecompressStream(stream)
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
}
diffID := digest.Canonical.Digester()
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
_, err = io.Copy(diffID.Hash(), decompressed)
decompressed.Close()
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
}
// Determine blob properties, and fail if information that we were given about the blob
// is known to be incorrect.
blobDigest := digester.Digest()
blobSize := blobinfo.Size
if blobSize < 0 {
blobSize = counter.Count
} else if blobinfo.Size != counter.Count {
return private.UploadedBlob{}, ErrBlobSizeMismatch
}
// Record information about the blob.
s.lock.Lock()
s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest()
s.lockProtected.fileSizes[blobDigest] = counter.Count
s.lockProtected.filenames[blobDigest] = filename
s.lock.Unlock()
// This is safe because we have just computed diffID, and blobDigest was either computed
// by us, or validated by the caller (usually copy.digestingReader).
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return private.UploadedBlob{
Digest: blobDigest,
Size: blobSize,
}, nil
}
type zstdFetcher struct {
chunkAccessor private.BlobChunkAccessor
ctx context.Context
blobInfo types.BlobInfo
}
// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
newChunks := make([]private.ImageSourceChunk, 0, len(chunks))
for _, v := range chunks {
i := private.ImageSourceChunk{
Offset: v.Offset,
Length: v.Length,
}
newChunks = append(newChunks, i)
}
rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks)
if _, ok := err.(private.BadPartialRequestError); ok {
err = chunked.ErrBadRequest{}
}
return rc, errs, err
}
// PutBlobPartial attempts to create a blob using the data that is already present
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail.
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
// The fallback _must not_ be done otherwise.
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) {
fetcher := zstdFetcher{
chunkAccessor: chunkAccessor,
ctx: ctx,
blobInfo: srcInfo,
}
defer func() {
var perr chunked.ErrFallbackToOrdinaryLayerDownload
if errors.As(retErr, &perr) {
retErr = private.NewErrFallbackToOrdinaryLayerDownload(retErr)
}
}()
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
if err != nil {
return private.UploadedBlob{}, err
}
out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ)
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("staging a partially-pulled layer: %w", err)
}
succeeded := false
defer func() {
if !succeeded {
_ = s.imageRef.transport.store.CleanupStagedLayer(out)
}
}()
if out.TOCDigest == "" && out.UncompressedDigest == "" {
return private.UploadedBlob{}, errors.New("internal error: PrepareStagedLayer succeeded with neither TOCDigest nor UncompressedDigest set")
}
blobDigest := srcInfo.Digest
s.lock.Lock()
if err := func() error { // A scope for defer
defer s.lock.Unlock()
if out.UncompressedDigest != "" {
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
if out.TOCDigest != "" {
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
}
// Don’t set indexToTOCDigest on this path:
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID.
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
// That TOC is quite unlikely to match any other TOC value.
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
// responsible for ensuring blobDigest has been validated.
if out.CompressedDigest != blobDigest {
return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
out.CompressedDigest, blobDigest)
}
// So, record also information about blobDigest, that might benefit reuse.
// We trust PrepareStagedLayer to validate or create both values correctly.
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
} else {
// Use diffID for layer identity if it is known.
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
}
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
}
s.lockProtected.diffOutputs[options.LayerIndex] = out
return nil
}(); err != nil {
return private.UploadedBlob{}, err
}
succeeded = true
return private.UploadedBlob{
Digest: blobDigest,
Size: srcInfo.Size,
}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
return false, private.ReusedBlob{}, nil
}
reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
if err != nil || !reused || options.LayerIndex == nil {
return reused, info, err
}
return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
digest: info.Digest,
emptyLayer: options.EmptyLayer,
})
}
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if blobDigest == "" {
return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
}
if err := blobDigest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
if options.TOCDigest != "" {
if err := options.TOCDigest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
}
// lock the entire method as it executes fairly quickly
s.lock.Lock()
defer s.lock.Unlock()
if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
// Check if we have the layer in the underlying additional layer store.
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
} else if err == nil {
alsTOCDigest := aLayer.TOCDigest()
if alsTOCDigest != options.TOCDigest {
// FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
// probably go on reading the layer from other sources.
//
// Currently it should not be possible for alsTOCDigest to be set and not the expected value, but there’s
// not that much benefit to checking for equality — we trust the FUSE server to validate the digest either way.
return false, private.ReusedBlob{}, fmt.Errorf("additional layer for TOCDigest %q reports unexpected TOCDigest %q",
options.TOCDigest, alsTOCDigest)
}
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
s.lockProtected.indexToAdditionalLayer[*options.LayerIndex] = aLayer
return true, private.ReusedBlob{
Digest: blobDigest,
Size: aLayer.CompressedSize(),
}, nil
}
}
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
// Check if we've already cached it in a file.
if size, ok := s.lockProtected.fileSizes[blobDigest]; ok {
// s.lockProtected.blobDiffIDs is set either by putBlobToPendingFile or in createNewLayer when creating the
// filenames/fileSizes entry.
return true, private.ReusedBlob{
Digest: blobDigest,
Size: size,
}, nil
}
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobDigest, err)
}
if len(layers) > 0 {
s.lockProtected.blobDiffIDs[blobDigest] = blobDigest
return true, private.ReusedBlob{
Digest: blobDigest,
Size: layers[0].UncompressedSize,
}, nil
}
// Check if we have a was-compressed layer in storage that's based on that blob.
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobDigest, err)
}
if len(layers) > 0 {
// LayersByCompressedDigest only finds layers which were created from a full layer blob, and extracting that
// always sets UncompressedDigest.
diffID := layers[0].UncompressedDigest
if diffID == "" {
return false, private.ReusedBlob{}, fmt.Errorf("internal error: compressed layer %q (for compressed digest %q) does not have an uncompressed digest", layers[0].ID, blobDigest.String())
}
s.lockProtected.blobDiffIDs[blobDigest] = diffID
return true, private.ReusedBlob{
Digest: blobDigest,
Size: layers[0].CompressedSize,
}, nil
}
// Does the blob correspond to a known DiffID which we already have available?
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
// uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
if options.CanSubstitute || size != -1 {
if uncompressedDigest := options.Cache.UncompressedDigest(blobDigest); uncompressedDigest != "" && uncompressedDigest != blobDigest {
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
}
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
return true, reused, nil
}
}
}
if options.TOCDigest != "" && options.LayerIndex != nil {
// Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that.
// Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse.
uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest)
if uncompressedDigest != "" {
layers, err = s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
}
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
reused.MatchedByTOCDigest = true
return true, reused, nil
}
}
// Check if we have a chunked layer in storage with the same TOC digest.
layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
}
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
if uncompressedDigest != "" {
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
}
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
reused.MatchedByTOCDigest = true
return true, reused, nil
}
}
// Nope, we don't have it.
return false, private.ReusedBlob{}, nil
}
// reusedBlobFromLayerLookup returns (true, ReusedBlob) if layers contain a usable match; or (false, ...) if not.
// The caller is still responsible for setting the layer identification fields, to allow the layer to be found again.
func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest, blobSize int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob) {
if len(layers) > 0 {
if blobSize != -1 {
return true, private.ReusedBlob{
Digest: blobDigest,
Size: blobSize,
}
} else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
return true, private.ReusedBlob{
Digest: layers[0].UncompressedDigest,
Size: layers[0].UncompressedSize,
CompressionOperation: types.Decompress,
CompressionAlgorithm: nil,
}
}
}
return false, private.ReusedBlob{}
}
// trustedLayerIdentityData is a _consistent_ set of information known about a single layer.
type trustedLayerIdentityData struct {
layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID
diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC
tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC
blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted.
}
// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest).
// blobDigest is the (possibly-compressed) layer digest referenced in the manifest.
// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available.
//
// The caller must hold s.lock.
func (s *storageImageDestination) trustedLayerIdentityDataLocked(layerIndex int, blobDigest digest.Digest) (trustedLayerIdentityData, bool) {
// The decision about layerIdentifiedByTOC must be _stable_ once the data for layerIndex is set,
// even if s.lockProtected.blobDiffIDs changes later and we can subsequently find an entry that wasn’t originally available.
//
// If we previously didn't have a blobDigest match and decided to use the TOC, but _later_ we happen to find
// a blobDigest match, we might in principle want to reconsider, set layerIdentifiedByTOC to false, and use the file:
// but the layer in question, and possibly child layers, might already have been committed to storage.
// A late-arriving addition to s.lockProtected.blobDiffIDs would mean that we would want to set
// new layer IDs for potentially the whole parent chain = throw away the just-created layers and create them all again.
//
// Such a within-image layer reuse is expected to be pretty rare; instead, ignore the unexpected file match
// and proceed to the originally-planned TOC match.
res := trustedLayerIdentityData{}
diffID, layerIdentifiedByDiffID := s.lockProtected.indexToDiffID[layerIndex]
if layerIdentifiedByDiffID {
res.layerIdentifiedByTOC = false
res.diffID = diffID
}
if tocDigest, ok := s.lockProtected.indexToTOCDigest[layerIndex]; ok {
res.tocDigest = tocDigest
if !layerIdentifiedByDiffID {
res.layerIdentifiedByTOC = true
}
}
if otherDiffID, ok := s.lockProtected.blobDiffIDs[blobDigest]; ok {
if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
// This is the only data we have, so it is clearly self-consistent.
res.layerIdentifiedByTOC = false
res.diffID = otherDiffID
res.blobDigest = blobDigest
layerIdentifiedByDiffID = true
} else {
// We have set up the layer identity without referring to blobDigest:
// an attacker might have used a manifest with non-matching tocDigest and blobDigest.
// But, if we know a trusted diffID value from other sources, and it matches the one for blobDigest,
// we know blobDigest is fine as well.
if res.diffID != "" && otherDiffID == res.diffID {
res.blobDigest = blobDigest
}
}
}
if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
return trustedLayerIdentityData{}, false // We found nothing at all
}
return res, true
}
// computeID computes a recommended image ID based on information we have so far. If
// the manifest is not of a type that we recognize, we return an empty value, indicating
// that since we don't have a recommendation, a random ID should be used if one needs
// to be allocated.
func (s *storageImageDestination) computeID(m manifest.Manifest) (string, error) {
// This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
layerInfos := m.LayerInfos()
// Build the diffID list. We need the decompressed sums that we've been calculating to
// fill in the DiffIDs. It's expected (but not enforced by us) that the number of
// diffIDs corresponds to the number of non-EmptyLayer entries in the history.
var diffIDs []digest.Digest
switch m.(type) {
case *manifest.Schema1:
// Build a list of the diffIDs we've generated for the non-throwaway FS layers
for i, li := range layerInfos {
if li.EmptyLayer {
continue
}
trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
}
if trusted.diffID == "" {
if trusted.layerIdentifiedByTOC {
logrus.Infof("v2s1 image uses a layer identified by TOC with unknown diffID; choosing a random image ID")
return "", nil
}
return "", fmt.Errorf("internal inconsistency: layer (%d, %q) is not identified by TOC and has no diffID", i, li.Digest)
}
diffIDs = append(diffIDs, trusted.diffID)
}
case *manifest.Schema2, *manifest.OCI1:
// We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
// the diffID list.
default:
return "", nil
}
// We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption.
//
// Traditionally that means the same ~config digest, as computed by m.ImageID;
// but if we identify a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
// nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see
// most of the data.
//
// So, if a layer is identified by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
// must enter into the image ID computation.
// But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades,
// and to introduce the changed behavior only when partial pulls are used.
//
// Note that it’s not 100% guaranteed that an image pulled by TOC uses an OCI manifest; consider
// (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above.
ordinaryImageID, err := m.ImageID(diffIDs)
if err != nil {
return "", err
}
tocIDInput := ""
hasLayerPulledByTOC := false
for i, li := range layerInfos {
trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
}
layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case.
if trusted.layerIdentifiedByTOC {
hasLayerPulledByTOC = true
layerValue = trusted.tocDigest.String()
}
tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator.
}
if !hasLayerPulledByTOC {
return ordinaryImageID, nil
}
// ordinaryImageID is a digest of a config, which is a JSON value.
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
return tocImageID, nil
}
// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
// information out of it for Inspect().
func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
if info.Digest == "" {
return nil, errors.New(`no digest supplied when reading blob`)
}
if err := info.Digest.Validate(); err != nil {
return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err)
}
// Assume it's a file, since we're only calling this from a place that expects to read files.
if filename, ok := s.lockProtected.filenames[info.Digest]; ok {
contents, err2 := os.ReadFile(filename)
if err2 != nil {
return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2)
}
return contents, nil
}
// If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
return nil, errors.New("blob not found")
}
// queueOrCommit queues the specified layer to be committed to the storage.
// If no other goroutine is already committing layers, the layer and all
// subsequent layers (if already queued) will be committed to the storage.
func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error {
// NOTE: whenever the code below is touched, make sure that all code
// paths unlock the lock and to unlock it exactly once.
//
// Conceptually, the code is divided in two stages:
//
// 1) Queue in work by marking the layer as ready to be committed.
// If at least one previous/parent layer with a lower index has
// not yet been committed, return early.
//
// 2) Process the queued-in work by committing the "ready" layers
// in sequence. Make sure that more items can be queued-in
// during the comparatively I/O expensive task of committing a
// layer.
//
// The conceptual benefit of this design is that caller can continue
// pulling layers after an early return. At any given time, only one
// caller is the "worker" routine committing layers. All other routines
// can continue pulling and queuing in layers.
s.lock.Lock()
s.lockProtected.indexToAddedLayerInfo[index] = info
// We're still waiting for at least one previous/parent layer to be
// committed, so there's nothing to do.
if index != s.lockProtected.currentIndex {
s.lock.Unlock()
return nil
}
for {
info, ok := s.lockProtected.indexToAddedLayerInfo[index]
if !ok {
break
}
s.lock.Unlock()
// Note: commitLayer locks on-demand.
if stopQueue, err := s.commitLayer(index, info, -1); stopQueue || err != nil {
return err
}
s.lock.Lock()
index++
}
// Set the index at the very end to make sure that only one routine
// enters stage 2).
s.lockProtected.currentIndex = index
s.lock.Unlock()
return nil
}
// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID,
// and an indication whether the input already has the shape of a layer ID.
// It returns ("", false) if the layer is not found at all (which should never happen)
func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) {
s.lock.Lock()
defer s.lock.Unlock()
trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest)
if !ok {
return "", false
}
if trusted.layerIdentifiedByTOC {
return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
}
return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
}
// commitLayer commits the specified layer with the given index to the storage.
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
//
// If the layer cannot be committed yet, the function returns (true, nil).
//
// Note that the previous layer is expected to already be committed.
//
// Caution: this function must be called without holding `s.lock`. Callers
// must guarantee that, at any given time, at most one goroutine may execute
// `commitLayer()`.
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
// Already committed? Return early.
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
return false, nil
}
// Start with an empty string or the previous layer ID. Note that
// `s.indexToStorageID` can only be accessed by *one* goroutine at any
// given time. Hence, we don't need to lock accesses.
var parentLayer string
if index != 0 {
prev, ok := s.indexToStorageID[index-1]
if !ok {
return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1)
}
parentLayer = prev
}
// Carry over the previous ID for empty non-base layers.
if info.emptyLayer {
s.indexToStorageID[index] = parentLayer
return false, nil
}
// Check if there's already a layer with the ID that we'd give to the result of applying
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
// The layerID refers either to the DiffID or the digest of the TOC.
layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest)
if layerIDComponent == "" {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
//
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller
// that relies on using a blob digest that has never been seen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
// so far we are going to accommodate that (if we should be doing that at all).
//
// We are also ignoring lookups by TOC, and other non-trivial situations.
// Those can only happen using the c/image/internal/private API,
// so those internal callers should be fixed to follow the API instead of expanding this fallback.
logrus.Debugf("looking for diffID for blob=%+v", info.digest)
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
Cache: none.NoCache,
CanSubstitute: false,
})
if err != nil {
return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
}
if !has {
return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
}
layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest)
if layerIDComponent == "" {
return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String())
}
}
id := layerIDComponent
if !layerIDComponentStandalone || parentLayer != "" {
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
}
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
// There's already a layer that should have the right contents, just reuse it.
s.indexToStorageID[index] = layer.ID
return false, nil
}
layer, err := s.createNewLayer(index, info.digest, parentLayer, id)
if err != nil {
return false, err
}
if layer == nil {
return true, nil
}
s.indexToStorageID[index] = layer.ID
return false, nil
}
// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be "").
// If the layer cannot be committed yet, the function returns (nil, nil).
func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) {
s.lock.Lock()
diffOutput, ok := s.lockProtected.diffOutputs[index]
s.lock.Unlock()
if ok {
// If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput.
// That way it will be persisted in storage even if the cache is deleted; also
// we can use the value below to avoid the untrustedUncompressedDigest logic (and notably
// the costly commit delay until a manifest is available).
s.lock.Lock()
if d, ok := s.lockProtected.indexToDiffID[index]; ok {
diffOutput.UncompressedDigest = d
}
s.lock.Unlock()
var untrustedUncompressedDigest digest.Digest
if diffOutput.UncompressedDigest == "" {
d, err := s.untrustedLayerDiffID(index)
if err != nil {
return nil, err
}
if d == "" {
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
return nil, nil
}
untrustedUncompressedDigest = d
// While the contents of the digest are untrusted, make sure at least the _format_ is valid,
// because we are going to write it to durable storage in expectedLayerDiffIDFlag .
if err := untrustedUncompressedDigest.Validate(); err != nil {
return nil, err
}
}
flags := make(map[string]interface{})
if untrustedUncompressedDigest != "" {
flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest.String()
logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID)
}
args := storage.ApplyStagedLayerOptions{
ID: newLayerID,
ParentLayer: parentLayer,
DiffOutput: diffOutput,
DiffOptions: &graphdriver.ApplyDiffWithDifferOpts{
Flags: flags,
},
}
layer, err := s.imageRef.transport.store.ApplyStagedLayer(args)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return nil, fmt.Errorf("failed to put layer using a partial pull: %w", err)
}
return layer, nil
}
s.lock.Lock()
al, ok := s.lockProtected.indexToAdditionalLayer[index]
s.lock.Unlock()
if ok {
layer, err := al.PutAs(newLayerID, parentLayer, nil)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return nil, fmt.Errorf("failed to put layer from digest and labels: %w", err)
}
return layer, nil
}
// Check if we previously cached a file with that blob's contents. If we didn't,
// then we need to read the desired contents from a layer.
var filename string