-
Notifications
You must be signed in to change notification settings - Fork 246
/
store.go
3656 lines (3326 loc) · 96.2 KB
/
store.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package storage
import (
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"time"
// register all of the built-in drivers
_ "github.com/containers/storage/drivers/register"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/types"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
var (
stores []*store
storesLock sync.Mutex
)
// ROFileBasedStore wraps up the methods of the various types of file-based
// data stores that we implement which are needed for both read-only and
// read-write files.
type ROFileBasedStore interface {
Locker
// Load reloads the contents of the store from disk. It should be called
// with the lock held.
Load() error
// ReloadIfChanged reloads the contents of the store from disk if it is changed.
ReloadIfChanged() error
}
// RWFileBasedStore wraps up the methods of various types of file-based data
// stores that we implement using read-write files.
type RWFileBasedStore interface {
// Save saves the contents of the store to disk. It should be called with
// the lock held, and Touch() should be called afterward before releasing the
// lock.
Save() error
}
// FileBasedStore wraps up the common methods of various types of file-based
// data stores that we implement.
type FileBasedStore interface {
ROFileBasedStore
RWFileBasedStore
}
// ROMetadataStore wraps a method for reading metadata associated with an ID.
type ROMetadataStore interface {
// Metadata reads metadata associated with an item with the specified ID.
Metadata(id string) (string, error)
}
// RWMetadataStore wraps a method for setting metadata associated with an ID.
type RWMetadataStore interface {
// SetMetadata updates the metadata associated with the item with the specified ID.
SetMetadata(id, metadata string) error
}
// MetadataStore wraps up methods for getting and setting metadata associated with IDs.
type MetadataStore interface {
ROMetadataStore
RWMetadataStore
}
// An ROBigDataStore wraps up the read-only big-data related methods of the
// various types of file-based lookaside stores that we implement.
type ROBigDataStore interface {
// BigData retrieves a (potentially large) piece of data associated with
// this ID, if it has previously been set.
BigData(id, key string) ([]byte, error)
// BigDataSize retrieves the size of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataSize(id, key string) (int64, error)
// BigDataDigest retrieves the digest of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataDigest(id, key string) (digest.Digest, error)
// BigDataNames() returns a list of the names of previously-stored pieces of
// data.
BigDataNames(id string) ([]string, error)
}
// A RWImageBigDataStore wraps up how we store big-data associated with images.
type RWImageBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
// Pass github.com/containers/image/manifest.Digest as digestManifest
// to allow ByDigest to find images by their correct digests.
SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
}
// A ContainerBigDataStore wraps up how we store big-data associated with containers.
type ContainerBigDataStore interface {
ROBigDataStore
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
SetBigData(id, key string, data []byte) error
}
// A ROLayerBigDataStore wraps up how we store RO big-data associated with layers.
type ROLayerBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
BigData(id, key string) (io.ReadCloser, error)
// BigDataNames() returns a list of the names of previously-stored pieces of
// data.
BigDataNames(id string) ([]string, error)
}
// A RWLayerBigDataStore wraps up how we store big-data associated with layers.
type RWLayerBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
SetBigData(id, key string, data io.Reader) error
}
// A LayerBigDataStore wraps up how we store big-data associated with layers.
type LayerBigDataStore interface {
ROLayerBigDataStore
RWLayerBigDataStore
}
// A FlaggableStore can have flags set and cleared on items which it manages.
type FlaggableStore interface {
// ClearFlag removes a named flag from an item in the store.
ClearFlag(id string, flag string) error
// SetFlag sets a named flag and its value on an item in the store.
SetFlag(id string, flag string, value interface{}) error
}
type StoreOptions = types.StoreOptions
// Store wraps up the various types of file-based stores that we use into a
// singleton object that initializes and manages them all together.
type Store interface {
// RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve
// settings that were passed to GetStore() when the object was created.
RunRoot() string
GraphRoot() string
GraphDriverName() string
GraphOptions() []string
UIDMap() []idtools.IDMap
GIDMap() []idtools.IDMap
// GraphDriver obtains and returns a handle to the graph Driver object used
// by the Store.
GraphDriver() (drivers.Driver, error)
// CreateLayer creates a new layer in the underlying storage driver,
// optionally having the specified ID (one will be assigned if none is
// specified), with the specified layer (or no layer) as its parent,
// and with optional names. (The writeable flag is ignored.)
CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error)
// PutLayer combines the functions of CreateLayer and ApplyDiff,
// marking the layer for automatic removal if applying the diff fails
// for any reason.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error)
// CreateImage creates a new image, optionally with the specified ID
// (one will be assigned if none is specified), with optional names,
// referring to a specified image, and with optional metadata. An
// image is a record which associates the ID of a layer with a
// additional bookkeeping information which the library stores for the
// convenience of its caller.
CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error)
// CreateContainer creates a new container, optionally with the
// specified ID (one will be assigned if none is specified), with
// optional names, using the specified image's top layer as the basis
// for the container's layer, and assigning the specified ID to that
// layer (one will be created if none is specified). A container is a
// layer which is associated with additional bookkeeping information
// which the library stores for the convenience of its caller.
CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
// Metadata retrieves the metadata which is associated with a layer,
// image, or container (whichever the passed-in ID refers to).
Metadata(id string) (string, error)
// SetMetadata updates the metadata which is associated with a layer,
// image, or container (whichever the passed-in ID refers to) to match
// the specified value. The metadata value can be retrieved at any
// time using Metadata, or using Layer, Image, or Container and reading
// the object directly.
SetMetadata(id, metadata string) error
// Exists checks if there is a layer, image, or container which has the
// passed-in ID or name.
Exists(id string) bool
// Status asks for a status report, in the form of key-value pairs,
// from the underlying storage driver. The contents vary from driver
// to driver.
Status() ([][2]string, error)
// Delete removes the layer, image, or container which has the
// passed-in ID or name. Note that no safety checks are performed, so
// this can leave images with references to layers which do not exist,
// and layers with references to parents which no longer exist.
Delete(id string) error
// DeleteLayer attempts to remove the specified layer. If the layer is the
// parent of any other layer, or is referred to by any images, it will return
// an error.
DeleteLayer(id string) error
// DeleteImage removes the specified image if it is not referred to by
// any containers. If its top layer is then no longer referred to by
// any other images and is not the parent of any other layers, its top
// layer will be removed. If that layer's parent is no longer referred
// to by any other images and is not the parent of any other layers,
// then it, too, will be removed. This procedure will be repeated
// until a layer which should not be removed, or the base layer, is
// reached, at which point the list of removed layers is returned. If
// the commit argument is false, the image and layers are not removed,
// but the list of layers which would be removed is still returned.
DeleteImage(id string, commit bool) (layers []string, err error)
// DeleteContainer removes the specified container and its layer. If
// there is no matching container, or if the container exists but its
// layer does not, an error will be returned.
DeleteContainer(id string) error
// Wipe removes all known layers, images, and containers.
Wipe() error
// MountImage mounts an image to temp directory and returns the mount point.
// MountImage allows caller to mount an image. Images will always
// be mounted read/only
MountImage(id string, mountOptions []string, mountLabel string) (string, error)
// Unmount attempts to unmount an image, given an ID.
// Returns whether or not the layer is still mounted.
UnmountImage(id string, force bool) (bool, error)
// Mount attempts to mount a layer, image, or container for access, and
// returns the pathname if it succeeds.
// Note if the mountLabel == "", the default label for the container
// will be used.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
Mount(id, mountLabel string) (string, error)
// Unmount attempts to unmount a layer, image, or container, given an ID, a
// name, or a mount path. Returns whether or not the layer is still mounted.
Unmount(id string, force bool) (bool, error)
// Mounted returns number of times the layer has been mounted.
Mounted(id string) (int, error)
// Changes returns a summary of the changes which would need to be made
// to one layer to make its contents the same as a second layer. If
// the first layer is not specified, the second layer's parent is
// assumed. Each Change structure contains a Path relative to the
// layer's root directory, and a Kind which is either ChangeAdd,
// ChangeModify, or ChangeDelete.
Changes(from, to string) ([]archive.Change, error)
// DiffSize returns a count of the size of the tarstream which would
// specify the changes returned by Changes.
DiffSize(from, to string) (int64, error)
// Diff returns the tarstream which would specify the changes returned
// by Changes. If options are passed in, they can override default
// behaviors.
Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
// ApplyDiff applies a tarstream to a layer. Information about the
// tarstream is cached with the layer. Typically, a layer which is
// populated using a tarstream will be expected to not be modified in
// any other way, either before or after the diff is applied.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
ApplyDiff(to string, diff io.Reader) (int64, error)
// ApplyDiffer applies a diff to a layer.
// It is the caller responsibility to clean the staging directory if it is not
// successfully applied with ApplyDiffFromStagingDirectory.
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
// DifferTarget gets the path to the differ target.
DifferTarget(id string) (string, error)
// LayersByCompressedDigest returns a slice of the layers with the
// specified compressed digest value recorded for them.
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
// LayersByUncompressedDigest returns a slice of the layers with the
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
// LayerSize returns a cached approximation of the layer's size, or -1
// if we don't have a value on hand.
LayerSize(id string) (int64, error)
// LayerParentOwners returns the UIDs and GIDs of owners of parents of
// the layer's mountpoint for which the layer's UID and GID maps (if
// any are defined) don't contain corresponding IDs.
LayerParentOwners(id string) ([]int, []int, error)
// Layers returns a list of the currently known layers.
Layers() ([]Layer, error)
// Images returns a list of the currently known images.
Images() ([]Image, error)
// Containers returns a list of the currently known containers.
Containers() ([]Container, error)
// Names returns the list of names for a layer, image, or container.
Names(id string) ([]string, error)
// Free removes the store from the list of stores
Free()
// SetNames changes the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
SetNames(id string, names []string) error
// ListImageBigData retrieves a list of the (possibly large) chunks of
// named data associated with an image.
ListImageBigData(id string) ([]string, error)
// ImageBigData retrieves a (possibly large) chunk of named data
// associated with an image.
ImageBigData(id, key string) ([]byte, error)
// ImageBigDataSize retrieves the size of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataSize(id, key string) (int64, error)
// ImageBigDataDigest retrieves the digest of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataDigest(id, key string) (digest.Digest, error)
// SetImageBigData stores a (possibly large) chunk of named data
// associated with an image. Pass
// github.com/containers/image/manifest.Digest as digestManifest to
// allow ImagesByDigest to find images by their correct digests.
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
// ListLayerBigData retrieves a list of the (possibly large) chunks of
// named data associated with an layer.
ListLayerBigData(id string) ([]string, error)
// LayerBigData retrieves a (possibly large) chunk of named data
// associated with a layer.
LayerBigData(id, key string) (io.ReadCloser, error)
// SetLayerBigData stores a (possibly large) chunk of named data
// associated with a layer.
SetLayerBigData(id, key string, data io.Reader) error
// ImageSize computes the size of the image's layers and ancillary data.
ImageSize(id string) (int64, error)
// ListContainerBigData retrieves a list of the (possibly large) chunks of
// named data associated with a container.
ListContainerBigData(id string) ([]string, error)
// ContainerBigData retrieves a (possibly large) chunk of named data
// associated with a container.
ContainerBigData(id, key string) ([]byte, error)
// ContainerBigDataSize retrieves the size of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataSize(id, key string) (int64, error)
// ContainerBigDataDigest retrieves the digest of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataDigest(id, key string) (digest.Digest, error)
// SetContainerBigData stores a (possibly large) chunk of named data
// associated with a container.
SetContainerBigData(id, key string, data []byte) error
// ContainerSize computes the size of the container's layer and ancillary
// data. Warning: this is a potentially expensive operation.
ContainerSize(id string) (int64, error)
// Layer returns a specific layer.
Layer(id string) (*Layer, error)
// Image returns a specific image.
Image(id string) (*Image, error)
// ImagesByTopLayer returns a list of images which reference the specified
// layer as their top layer. They will have different IDs and names
// and may have different metadata, big data items, and flags.
ImagesByTopLayer(id string) ([]*Image, error)
// ImagesByDigest returns a list of images which contain a big data item
// named ImageDigestBigDataKey whose contents have the specified digest.
ImagesByDigest(d digest.Digest) ([]*Image, error)
// Container returns a specific container.
Container(id string) (*Container, error)
// ContainerByLayer returns a specific container based on its layer ID or
// name.
ContainerByLayer(id string) (*Container, error)
// ContainerDirectory returns a path of a directory which the caller
// can use to store data, specific to the container, which the library
// does not directly manage. The directory will be deleted when the
// container is deleted.
ContainerDirectory(id string) (string, error)
// SetContainerDirectoryFile is a convenience function which stores
// a piece of data in the specified file relative to the container's
// directory.
SetContainerDirectoryFile(id, file string, data []byte) error
// FromContainerDirectory is a convenience function which reads
// the contents of the specified file relative to the container's
// directory.
FromContainerDirectory(id, file string) ([]byte, error)
// ContainerRunDirectory returns a path of a directory which the
// caller can use to store data, specific to the container, which the
// library does not directly manage. The directory will be deleted
// when the host system is restarted.
ContainerRunDirectory(id string) (string, error)
// SetContainerRunDirectoryFile is a convenience function which stores
// a piece of data in the specified file relative to the container's
// run directory.
SetContainerRunDirectoryFile(id, file string, data []byte) error
// FromContainerRunDirectory is a convenience function which reads
// the contents of the specified file relative to the container's run
// directory.
FromContainerRunDirectory(id, file string) ([]byte, error)
// ContainerParentOwners returns the UIDs and GIDs of owners of parents
// of the container's layer's mountpoint for which the layer's UID and
// GID maps (if any are defined) don't contain corresponding IDs.
ContainerParentOwners(id string) ([]int, []int, error)
// Lookup returns the ID of a layer, image, or container with the specified
// name or ID.
Lookup(name string) (string, error)
// Shutdown attempts to free any kernel resources which are being used
// by the underlying driver. If "force" is true, any mounted (i.e., in
// use) layers are unmounted beforehand. If "force" is not true, then
// layers being in use is considered to be an error condition. A list
// of still-mounted layers is returned along with possible errors.
Shutdown(force bool) (layers []string, err error)
// Version returns version information, in the form of key-value pairs, from
// the storage package.
Version() ([][2]string, error)
// GetDigestLock returns digest-specific Locker.
GetDigestLock(digest.Digest) (Locker, error)
// LayerFromAdditionalLayerStore searches layers from the additional layer store and
// returns the object for handling this. Note that this hasn't been stored to this store
// yet so this needs to be done through PutAs method.
// Releasing AdditionalLayer handler is caller's responsibility.
// This API is experimental and can be changed without bumping the major version number.
LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error)
}
// AdditionalLayer reprents a layer that is contained in the additional layer store
// This API is experimental and can be changed without bumping the major version number.
type AdditionalLayer interface {
// PutAs creates layer based on this handler, using diff contents from the additional
// layer store.
PutAs(id, parent string, names []string) (*Layer, error)
// UncompressedDigest returns the uncompressed digest of this layer
UncompressedDigest() digest.Digest
// CompressedSize returns the compressed size of this layer
CompressedSize() int64
// Release tells the additional layer store that we don't use this handler.
Release()
}
type AutoUserNsOptions = types.AutoUserNsOptions
type IDMappingOptions = types.IDMappingOptions
// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods.
type LayerOptions struct {
// IDMappingOptions specifies the type of ID mapping which should be
// used for this layer. If nothing is specified, the layer will
// inherit settings from its parent layer or, if it has no parent
// layer, the Store object.
types.IDMappingOptions
// TemplateLayer is the ID of a layer whose contents will be used to
// initialize this layer. If set, it should be a child of the layer
// which we want to use as the parent of the new layer.
TemplateLayer string
// OriginalDigest specifies a digest of the tarstream (diff), if one is
// provided along with these LayerOptions, and reliably known by the caller.
// Use the default "" if this fields is not applicable or the value is not known.
OriginalDigest digest.Digest
// UncompressedDigest specifies a digest of the uncompressed version (“DiffID”)
// of the tarstream (diff), if one is provided along with these LayerOptions,
// and reliably known by the caller.
// Use the default "" if this fields is not applicable or the value is not known.
UncompressedDigest digest.Digest
}
// ImageOptions is used for passing options to a Store's CreateImage() method.
type ImageOptions struct {
// CreationDate, if not zero, will override the default behavior of marking the image as having been
// created when CreateImage() was called, recording CreationDate instead.
CreationDate time.Time
// Digest is a hard-coded digest value that we can use to look up the image. It is optional.
Digest digest.Digest
}
// ContainerOptions is used for passing options to a Store's CreateContainer() method.
type ContainerOptions struct {
// IDMappingOptions specifies the type of ID mapping which should be
// used for this container's layer. If nothing is specified, the
// container's layer will inherit settings from the image's top layer
// or, if it is not being created based on an image, the Store object.
types.IDMappingOptions
LabelOpts []string
Flags map[string]interface{}
MountOpts []string
Volatile bool
}
type store struct {
lastLoaded time.Time
runRoot string
graphLock Locker
usernsLock Locker
graphRoot string
graphDriverName string
graphOptions []string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
autoUsernsUser string
additionalUIDs *idSet // Set by getAvailableIDs()
additionalGIDs *idSet // Set by getAvailableIDs()
autoNsMinSize uint32
autoNsMaxSize uint32
graphDriver drivers.Driver
layerStore LayerStore
roLayerStores []ROLayerStore
imageStore ImageStore
roImageStores []ROImageStore
containerStore ContainerStore
digestLockRoot string
disableVolatile bool
}
// GetStore attempts to find an already-created Store object matching the
// specified location and graph driver, and if it can't, it creates and
// initializes a new Store object, and the underlying storage that it controls.
//
// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used.
//
// These defaults observe environment variables:
// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use
// * `STORAGE_OPTS` for the string of options to pass to the driver
//
// Note that we do some of this work in a child process. The calling process's
// main() function needs to import our pkg/reexec package and should begin with
// something like this in order to allow us to properly start that child
// process:
// if reexec.Init() {
// return
// }
func GetStore(options types.StoreOptions) (Store, error) {
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
options = types.Options()
}
if options.GraphRoot != "" {
dir, err := filepath.Abs(options.GraphRoot)
if err != nil {
return nil, err
}
options.GraphRoot = dir
}
if options.RunRoot != "" {
dir, err := filepath.Abs(options.RunRoot)
if err != nil {
return nil, err
}
options.RunRoot = dir
}
storesLock.Lock()
defer storesLock.Unlock()
for _, s := range stores {
if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
return s, nil
}
}
if options.GraphRoot == "" {
return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
}
if options.RunRoot == "" {
return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
}
if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
return nil, err
}
if err := os.MkdirAll(options.GraphRoot, 0700); err != nil {
return nil, err
}
for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} {
if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil {
return nil, err
}
}
graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock"))
if err != nil {
return nil, err
}
usernsLock, err := GetLockfile(filepath.Join(options.GraphRoot, "userns.lock"))
if err != nil {
return nil, err
}
autoNsMinSize := options.AutoNsMinSize
autoNsMaxSize := options.AutoNsMaxSize
if autoNsMinSize == 0 {
autoNsMinSize = AutoUserNsMinSize
}
if autoNsMaxSize == 0 {
autoNsMaxSize = AutoUserNsMaxSize
}
s := &store{
runRoot: options.RunRoot,
graphLock: graphLock,
graphRoot: options.GraphRoot,
graphDriverName: options.GraphDriverName,
graphOptions: options.GraphDriverOptions,
uidMap: copyIDMap(options.UIDMap),
gidMap: copyIDMap(options.GIDMap),
autoUsernsUser: options.RootAutoNsUser,
autoNsMinSize: autoNsMinSize,
autoNsMaxSize: autoNsMaxSize,
additionalUIDs: nil,
additionalGIDs: nil,
usernsLock: usernsLock,
disableVolatile: options.DisableVolatile,
}
if err := s.load(); err != nil {
return nil, err
}
stores = append(stores, s)
return s, nil
}
func copyUint32Slice(slice []uint32) []uint32 {
m := []uint32{}
if slice != nil {
m = make([]uint32, len(slice))
copy(m, slice)
}
if len(m) > 0 {
return m[:]
}
return nil
}
func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap {
m := []idtools.IDMap{}
if idmap != nil {
m = make([]idtools.IDMap, len(idmap))
copy(m, idmap)
}
if len(m) > 0 {
return m[:]
}
return nil
}
func (s *store) RunRoot() string {
return s.runRoot
}
func (s *store) GraphDriverName() string {
return s.graphDriverName
}
func (s *store) GraphRoot() string {
return s.graphRoot
}
func (s *store) GraphOptions() []string {
return s.graphOptions
}
func (s *store) UIDMap() []idtools.IDMap {
return copyIDMap(s.uidMap)
}
func (s *store) GIDMap() []idtools.IDMap {
return copyIDMap(s.gidMap)
}
func (s *store) load() error {
driver, err := s.GraphDriver()
if err != nil {
return err
}
s.graphDriver = driver
s.graphDriverName = driver.String()
driverPrefix := s.graphDriverName + "-"
gipath := filepath.Join(s.graphRoot, driverPrefix+"images")
if err := os.MkdirAll(gipath, 0700); err != nil {
return err
}
ris, err := newImageStore(gipath)
if err != nil {
return err
}
s.imageStore = ris
if _, err := s.ROImageStores(); err != nil {
return err
}
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
if err := os.MkdirAll(gcpath, 0700); err != nil {
return err
}
rcs, err := newContainerStore(gcpath)
if err != nil {
return err
}
rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
if err := os.MkdirAll(rcpath, 0700); err != nil {
return err
}
s.containerStore = rcs
for _, store := range driver.AdditionalImageStores() {
gipath := filepath.Join(store, driverPrefix+"images")
ris, err := newROImageStore(gipath)
if err != nil {
return err
}
s.roImageStores = append(s.roImageStores, ris)
}
s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks")
if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil {
return err
}
return nil
}
// GetDigestLock returns a digest-specific Locker.
func (s *store) GetDigestLock(d digest.Digest) (Locker, error) {
return GetLockfile(filepath.Join(s.digestLockRoot, d.String()))
}
func (s *store) getGraphDriver() (drivers.Driver, error) {
if s.graphDriver != nil {
return s.graphDriver, nil
}
config := drivers.Options{
Root: s.graphRoot,
RunRoot: s.runRoot,
DriverOptions: s.graphOptions,
UIDMaps: s.uidMap,
GIDMaps: s.gidMap,
}
driver, err := drivers.New(s.graphDriverName, config)
if err != nil {
return nil, err
}
s.graphDriver = driver
s.graphDriverName = driver.String()
return driver, nil
}
func (s *store) GraphDriver() (drivers.Driver, error) {
s.graphLock.Lock()
defer s.graphLock.Unlock()
if s.graphLock.TouchedSince(s.lastLoaded) {
s.graphDriver = nil
s.layerStore = nil
s.lastLoaded = time.Now()
}
return s.getGraphDriver()
}
// LayerStore obtains and returns a handle to the writeable layer store object
// used by the Store. Accessing this store directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) LayerStore() (LayerStore, error) {
s.graphLock.Lock()
defer s.graphLock.Unlock()
if s.graphLock.TouchedSince(s.lastLoaded) {
s.graphDriver = nil
s.layerStore = nil
s.lastLoaded = time.Now()
}
if s.layerStore != nil {
return s.layerStore, nil
}
driver, err := s.getGraphDriver()
if err != nil {
return nil, err
}
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
if err := os.MkdirAll(rlpath, 0700); err != nil {
return nil, err
}
glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
if err := os.MkdirAll(glpath, 0700); err != nil {
return nil, err
}
rls, err := s.newLayerStore(rlpath, glpath, driver)
if err != nil {
return nil, err
}
s.layerStore = rls
return s.layerStore, nil
}
// ROLayerStores obtains additional read/only layer store objects used by the
// Store. Accessing these stores directly will bypass locking and
// synchronization, so it is not part of the exported Store interface.
func (s *store) ROLayerStores() ([]ROLayerStore, error) {
s.graphLock.Lock()
defer s.graphLock.Unlock()
if s.roLayerStores != nil {
return s.roLayerStores, nil
}
driver, err := s.getGraphDriver()
if err != nil {
return nil, err
}
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
if err := os.MkdirAll(rlpath, 0700); err != nil {
return nil, err
}
for _, store := range driver.AdditionalImageStores() {
glpath := filepath.Join(store, driverPrefix+"layers")
rls, err := newROLayerStore(rlpath, glpath, driver)
if err != nil {
return nil, err
}
s.roLayerStores = append(s.roLayerStores, rls)
}
return s.roLayerStores, nil
}
// ImageStore obtains and returns a handle to the writable image store object
// used by the Store. Accessing this store directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ImageStore() (ImageStore, error) {
if s.imageStore != nil {
return s.imageStore, nil
}
return nil, ErrLoadError
}
// ROImageStores obtains additional read/only image store objects used by the
// Store. Accessing these stores directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ROImageStores() ([]ROImageStore, error) {
if s.imageStore == nil {
return nil, ErrLoadError
}
return s.roImageStores, nil
}
// ContainerStore obtains and returns a handle to the container store object
// used by the Store. Accessing this store directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ContainerStore() (ContainerStore, error) {
if s.containerStore != nil {
return s.containerStore, nil
}
return nil, ErrLoadError
}
func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
if s.graphDriver == nil || !s.graphDriver.SupportsShifting() {
return false
}
if uidmap != nil && !idtools.IsContiguous(uidmap) {
return false
}
if gidmap != nil && !idtools.IsContiguous(gidmap) {
return false
}
return true
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) {
var parentLayer *Layer
rlstore, err := s.LayerStore()
if err != nil {
return nil, -1, err
}
rlstores, err := s.ROLayerStores()
if err != nil {
return nil, -1, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, -1, err
}
rlstore.Lock()
defer rlstore.Unlock()
if err := rlstore.ReloadIfChanged(); err != nil {
return nil, -1, err
}
rcstore.Lock()
defer rcstore.Unlock()
if err := rcstore.ReloadIfChanged(); err != nil {
return nil, -1, err
}
if id == "" {
id = stringid.GenerateRandomID()
}
if options == nil {
options = &LayerOptions{}
}
if options.HostUIDMapping {
options.UIDMap = nil
}
if options.HostGIDMapping {
options.GIDMap = nil
}
uidMap := options.UIDMap
gidMap := options.GIDMap
if parent != "" {
var ilayer *Layer