diff --git a/repo/config/datastore.go b/repo/config/datastore.go index 2b861a113cf..2b2bcb51828 100644 --- a/repo/config/datastore.go +++ b/repo/config/datastore.go @@ -9,30 +9,20 @@ const DefaultDataStoreDirectory = "datastore" // Datastore tracks the configuration of the datastore. type Datastore struct { - Type string - Path string StorageMax string // in B, kB, kiB, MB, ... StorageGCWatermark int64 // in percentage to multiply on StorageMax GCPeriod string // in ns, us, ms, s, m, h - Params *json.RawMessage - NoSync bool - HashOnRead bool - BloomFilterSize int -} + // deprecated fields, use Spec + Type string `json:",omitempty"` + Path string `json:",omitempty"` + NoSync bool `json:",omitempty"` + Params *json.RawMessage `json:",omitempty"` -func (d *Datastore) ParamData() []byte { - if d.Params == nil { - return nil - } + Spec map[string]interface{} - return []byte(*d.Params) -} - -type S3Datastore struct { - Region string `json:"region"` - Bucket string `json:"bucket"` - ACL string `json:"acl"` + HashOnRead bool + BloomFilterSize int } // DataStorePath returns the default data store path given a configuration root diff --git a/repo/config/init.go b/repo/config/init.go index f31edd42b33..80923355e5c 100644 --- a/repo/config/init.go +++ b/repo/config/init.go @@ -21,10 +21,7 @@ func Init(out io.Writer, nBitsForKeypair int) (*Config, error) { return nil, err } - datastore, err := datastoreConfig() - if err != nil { - return nil, err - } + datastore := DefaultDatastoreConfig() conf := &Config{ @@ -79,20 +76,40 @@ func Init(out io.Writer, nBitsForKeypair int) (*Config, error) { return conf, nil } -func datastoreConfig() (Datastore, error) { - dspath, err := DataStorePath("") - if err != nil { - return Datastore{}, err - } +// DefaultDatastoreConfig is an internal function exported to aid in testing. +func DefaultDatastoreConfig() Datastore { return Datastore{ - Path: dspath, - Type: "leveldb", StorageMax: "10GB", StorageGCWatermark: 90, // 90% GCPeriod: "1h", - HashOnRead: false, BloomFilterSize: 0, - }, nil + Spec: map[string]interface{}{ + "type": "mount", + "mounts": []interface{}{ + map[string]interface{}{ + "mountpoint": "/blocks", + "type": "measure", + "prefix": "flatfs.datastore", + "child": map[string]interface{}{ + "type": "flatfs", + "path": "blocks", + "sync": true, + "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", + }, + }, + map[string]interface{}{ + "mountpoint": "/", + "type": "measure", + "prefix": "leveldb.datastore", + "child": map[string]interface{}{ + "type": "levelds", + "path": "datastore", + "compression": "none", + }, + }, + }, + }, + } } // identityConfig initializes a new identity. diff --git a/repo/fsrepo/config_test.go b/repo/fsrepo/config_test.go new file mode 100644 index 00000000000..7377b09dff3 --- /dev/null +++ b/repo/fsrepo/config_test.go @@ -0,0 +1,219 @@ +package fsrepo + +import ( + "encoding/json" + "io/ioutil" + "os" + "reflect" + "testing" + + config "github.com/ipfs/go-ipfs/repo/config" +) + +// note: to test sorting of the mountpoints in the disk spec they are +// specified out of order in the test config +var defaultConfig = []byte(`{ + "StorageMax": "10GB", + "StorageGCWatermark": 90, + "GCPeriod": "1h", + "Spec": { + "mounts": [ + { + "child": { + "compression": "none", + "path": "datastore", + "type": "levelds" + }, + "mountpoint": "/", + "prefix": "leveldb.datastore", + "type": "measure" + }, + { + "child": { + "path": "blocks", + "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", + "sync": true, + "type": "flatfs" + }, + "mountpoint": "/blocks", + "prefix": "flatfs.datastore", + "type": "measure" + } + ], + "type": "mount" + }, + "HashOnRead": false, + "BloomFilterSize": 0 +}`) + +var leveldbConfig = []byte(`{ + "compression": "none", + "path": "datastore", + "type": "levelds" +}`) + +var flatfsConfig = []byte(`{ + "path": "blocks", + "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", + "sync": true, + "type": "flatfs" +}`) + +var measureConfig = []byte(`{ + "child": { + "path": "blocks", + "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", + "sync": true, + "type": "flatfs" + }, + "mountpoint": "/blocks", + "prefix": "flatfs.datastore", + "type": "measure" +}`) + +func TestDefaultDatastoreConfig(t *testing.T) { + dir, err := ioutil.TempDir("", "ipfs-datastore-config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) // clean up + + config := new(config.Datastore) + err = json.Unmarshal(defaultConfig, config) + if err != nil { + t.Fatal(err) + } + + dsc, err := AnyDatastoreConfig(config.Spec) + if err != nil { + t.Fatal(err) + } + + expected := `{"mounts":[{"mountpoint":"/blocks","path":"blocks","shardFunc":"/repo/flatfs/shard/v1/next-to-last/2","type":"flatfs"},{"mountpoint":"/","path":"datastore","type":"levelds"}],"type":"mount"}` + if dsc.DiskSpec().String() != expected { + t.Errorf("expected '%s' got '%s' as DiskId", expected, dsc.DiskSpec().String()) + } + + ds, err := dsc.Create(dir) + if err != nil { + t.Fatal(err) + } + + if typ := reflect.TypeOf(ds).String(); typ != "*syncmount.Datastore" { + t.Errorf("expected '*syncmount.Datastore' got '%s'", typ) + } +} + +func TestLevelDbConfig(t *testing.T) { + config := new(config.Datastore) + err := json.Unmarshal(defaultConfig, config) + if err != nil { + t.Fatal(err) + } + dir, err := ioutil.TempDir("", "ipfs-datastore-config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) // clean up + + spec := make(map[string]interface{}) + err = json.Unmarshal(leveldbConfig, &spec) + if err != nil { + t.Fatal(err) + } + + dsc, err := AnyDatastoreConfig(spec) + if err != nil { + t.Fatal(err) + } + + expected := `{"path":"datastore","type":"levelds"}` + if dsc.DiskSpec().String() != expected { + t.Errorf("expected '%s' got '%s' as DiskId", expected, dsc.DiskSpec().String()) + } + + ds, err := dsc.Create(dir) + if err != nil { + t.Fatal(err) + } + + if typ := reflect.TypeOf(ds).String(); typ != "*leveldb.datastore" { + t.Errorf("expected '*leveldb.datastore' got '%s'", typ) + } +} + +func TestFlatfsConfig(t *testing.T) { + config := new(config.Datastore) + err := json.Unmarshal(defaultConfig, config) + if err != nil { + t.Fatal(err) + } + dir, err := ioutil.TempDir("", "ipfs-datastore-config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) // clean up + + spec := make(map[string]interface{}) + err = json.Unmarshal(flatfsConfig, &spec) + if err != nil { + t.Fatal(err) + } + + dsc, err := AnyDatastoreConfig(spec) + if err != nil { + t.Fatal(err) + } + + expected := `{"path":"blocks","shardFunc":"/repo/flatfs/shard/v1/next-to-last/2","type":"flatfs"}` + if dsc.DiskSpec().String() != expected { + t.Errorf("expected '%s' got '%s' as DiskId", expected, dsc.DiskSpec().String()) + } + + ds, err := dsc.Create(dir) + if err != nil { + t.Fatal(err) + } + + if typ := reflect.TypeOf(ds).String(); typ != "*flatfs.Datastore" { + t.Errorf("expected '*flatfs.Datastore' got '%s'", typ) + } +} + +func TestMeasureConfig(t *testing.T) { + config := new(config.Datastore) + err := json.Unmarshal(defaultConfig, config) + if err != nil { + t.Fatal(err) + } + dir, err := ioutil.TempDir("", "ipfs-datastore-config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) // clean up + + spec := make(map[string]interface{}) + err = json.Unmarshal(measureConfig, &spec) + if err != nil { + t.Fatal(err) + } + + dsc, err := AnyDatastoreConfig(spec) + if err != nil { + t.Fatal(err) + } + + expected := `{"path":"blocks","shardFunc":"/repo/flatfs/shard/v1/next-to-last/2","type":"flatfs"}` + if dsc.DiskSpec().String() != expected { + t.Errorf("expected '%s' got '%s' as DiskId", expected, dsc.DiskSpec().String()) + } + + ds, err := dsc.Create(dir) + if err != nil { + t.Fatal(err) + } + + if typ := reflect.TypeOf(ds).String(); typ != "*measure.measure" { + t.Errorf("expected '*measure.measure' got '%s'", typ) + } +} diff --git a/repo/fsrepo/datastores.go b/repo/fsrepo/datastores.go new file mode 100644 index 00000000000..80d07decf16 --- /dev/null +++ b/repo/fsrepo/datastores.go @@ -0,0 +1,335 @@ +package fsrepo + +import ( + "bytes" + "encoding/json" + "fmt" + "path/filepath" + "sort" + + repo "github.com/ipfs/go-ipfs/repo" + + levelds "gx/ipfs/QmPdvXuXWAR6gtxxqZw42RtSADMwz4ijVmYHGS542b6cMz/go-ds-leveldb" + measure "gx/ipfs/QmSb95iHExSSb47zpmyn5CyY5PZidVWSjyKyDqgYQrnKor/go-ds-measure" + flatfs "gx/ipfs/QmUTshC2PP4ZDqkrFfDU4JGJFMWjYnunxPgkQ6ZCA2hGqh/go-ds-flatfs" + ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" + mount "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/syncmount" + ldbopts "gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/opt" +) + +// ConfigFromMap creates a new datastore config from a map +type ConfigFromMap func(map[string]interface{}) (DatastoreConfig, error) + +// DatastoreConfig is an abstraction of a datastore config. A "spec" +// is first converted to a DatastoreConfig and then Create() is called +// to instantiate a new datastore +type DatastoreConfig interface { + // DiskSpec returns a minimal configuration of the datastore + // represting what is stored on disk. Run time values are + // excluded. + DiskSpec() DiskSpec + + // Create instantiate a new datastore from this config + Create(path string) (repo.Datastore, error) +} + +// DiskSpec is the type returned by the DatastoreConfig's DiskSpec method +type DiskSpec map[string]interface{} + +// Bytes returns a minimal JSON encoding of the DiskSpec +func (spec DiskSpec) Bytes() []byte { + b, err := json.Marshal(spec) + if err != nil { + // should not happen + panic(err) + } + return bytes.TrimSpace(b) +} + +// String returns a minimal JSON encoding of the DiskSpec +func (spec DiskSpec) String() string { + return string(spec.Bytes()) +} + +var datastores map[string]ConfigFromMap + +func init() { + datastores = map[string]ConfigFromMap{ + "mount": MountDatastoreConfig, + "flatfs": FlatfsDatastoreConfig, + "levelds": LeveldsDatastoreConfig, + "mem": MemDatastoreConfig, + "log": LogDatastoreConfig, + "measure": MeasureDatastoreConfig, + } +} + +// AnyDatastoreConfig returns a DatastoreConfig from a spec based on +// the "type" parameter +func AnyDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { + which, ok := params["type"].(string) + if !ok { + return nil, fmt.Errorf("'type' field missing or not a string") + } + fun, ok := datastores[which] + if !ok { + return nil, fmt.Errorf("unknown datastore type: %s", which) + } + return fun(params) +} + +type mountDatastoreConfig struct { + mounts []premount +} + +type premount struct { + ds DatastoreConfig + prefix ds.Key +} + +// MountDatastoreConfig returns a mount DatastoreConfig from a spec +func MountDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { + var res mountDatastoreConfig + mounts, ok := params["mounts"].([]interface{}) + if !ok { + return nil, fmt.Errorf("'mounts' field is missing or not an array") + } + for _, iface := range mounts { + cfg, ok := iface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expected map for mountpoint") + } + + child, err := AnyDatastoreConfig(cfg) + if err != nil { + return nil, err + } + + prefix, found := cfg["mountpoint"] + if !found { + return nil, fmt.Errorf("no 'mountpoint' on mount") + } + + res.mounts = append(res.mounts, premount{ + ds: child, + prefix: ds.NewKey(prefix.(string)), + }) + } + sort.Slice(res.mounts, + func(i, j int) bool { + return res.mounts[i].prefix.String() > res.mounts[j].prefix.String() + }) + + return &res, nil +} + +func (c *mountDatastoreConfig) DiskSpec() DiskSpec { + cfg := map[string]interface{}{"type": "mount"} + mounts := make([]interface{}, len(c.mounts)) + for i, m := range c.mounts { + c := m.ds.DiskSpec() + if c == nil { + c = make(map[string]interface{}) + } + c["mountpoint"] = m.prefix.String() + mounts[i] = c + } + cfg["mounts"] = mounts + return cfg +} + +func (c *mountDatastoreConfig) Create(path string) (repo.Datastore, error) { + mounts := make([]mount.Mount, len(c.mounts)) + for i, m := range c.mounts { + ds, err := m.ds.Create(path) + if err != nil { + return nil, err + } + mounts[i].Datastore = ds + mounts[i].Prefix = m.prefix + } + return mount.New(mounts), nil +} + +type flatfsDatastoreConfig struct { + path string + shardFun *flatfs.ShardIdV1 + syncField bool +} + +// FlatfsDatastoreConfig returns a flatfs DatastoreConfig from a spec +func FlatfsDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { + var c flatfsDatastoreConfig + var ok bool + var err error + + c.path, ok = params["path"].(string) + if !ok { + return nil, fmt.Errorf("'path' field is missing or not boolean") + } + + sshardFun, ok := params["shardFunc"].(string) + if !ok { + return nil, fmt.Errorf("'shardFunc' field is missing or not a string") + } + c.shardFun, err = flatfs.ParseShardFunc(sshardFun) + if err != nil { + return nil, err + } + + c.syncField, ok = params["sync"].(bool) + if !ok { + return nil, fmt.Errorf("'sync' field is missing or not boolean") + } + return &c, nil +} + +func (c *flatfsDatastoreConfig) DiskSpec() DiskSpec { + return map[string]interface{}{ + "type": "flatfs", + "path": c.path, + "shardFunc": c.shardFun.String(), + } +} + +func (c *flatfsDatastoreConfig) Create(path string) (repo.Datastore, error) { + p := c.path + if !filepath.IsAbs(p) { + p = filepath.Join(path, p) + } + + return flatfs.CreateOrOpen(p, c.shardFun, c.syncField) +} + +type leveldsDatastoreConfig struct { + path string + compression ldbopts.Compression +} + +// LeveldsDatastoreConfig returns a levelds DatastoreConfig from a spec +func LeveldsDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { + var c leveldsDatastoreConfig + var ok bool + + c.path, ok = params["path"].(string) + if !ok { + return nil, fmt.Errorf("'path' field is missing or not string") + } + + switch cm := params["compression"].(string); cm { + case "none": + c.compression = ldbopts.NoCompression + case "snappy": + c.compression = ldbopts.SnappyCompression + case "": + c.compression = ldbopts.DefaultCompression + default: + return nil, fmt.Errorf("unrecognized value for compression: %s", cm) + } + + return &c, nil +} + +func (c *leveldsDatastoreConfig) DiskSpec() DiskSpec { + return map[string]interface{}{ + "type": "levelds", + "path": c.path, + } +} + +func (c *leveldsDatastoreConfig) Create(path string) (repo.Datastore, error) { + p := c.path + if !filepath.IsAbs(p) { + p = filepath.Join(path, p) + } + + return levelds.NewDatastore(p, &levelds.Options{ + Compression: c.compression, + }) +} + +type memDatastoreConfig struct { + cfg map[string]interface{} +} + +// MemDatastoreConfig returns a memory DatastoreConfig from a spec +func MemDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { + return &memDatastoreConfig{params}, nil +} + +func (c *memDatastoreConfig) DiskSpec() DiskSpec { + return nil +} + +func (c *memDatastoreConfig) Create(string) (repo.Datastore, error) { + return ds.NewMapDatastore(), nil +} + +type logDatastoreConfig struct { + child DatastoreConfig + name string +} + +// LogDatastoreConfig returns a log DatastoreConfig from a spec +func LogDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { + childField, ok := params["child"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("'child' field is missing or not a map") + } + child, err := AnyDatastoreConfig(childField) + if err != nil { + return nil, err + } + name, ok := params["name"].(string) + if !ok { + return nil, fmt.Errorf("'name' field was missing or not a string") + } + return &logDatastoreConfig{child, name}, nil + +} + +func (c *logDatastoreConfig) Create(path string) (repo.Datastore, error) { + child, err := c.child.Create(path) + if err != nil { + return nil, err + } + return ds.NewLogDatastore(child, c.name), nil +} + +func (c *logDatastoreConfig) DiskSpec() DiskSpec { + return c.child.DiskSpec() +} + +type measureDatastoreConfig struct { + child DatastoreConfig + prefix string +} + +// MeasureDatastoreConfig returns a measure DatastoreConfig from a spec +func MeasureDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { + childField, ok := params["child"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("'child' field is missing or not a map") + } + child, err := AnyDatastoreConfig(childField) + if err != nil { + return nil, err + } + prefix, ok := params["prefix"].(string) + if !ok { + return nil, fmt.Errorf("'prefix' field was missing or not a string") + } + return &measureDatastoreConfig{child, prefix}, nil +} + +func (c *measureDatastoreConfig) DiskSpec() DiskSpec { + return c.child.DiskSpec() +} + +func (c measureDatastoreConfig) Create(path string) (repo.Datastore, error) { + child, err := c.child.Create(path) + if err != nil { + return nil, err + } + return measure.New(c.prefix, child), nil +} diff --git a/repo/fsrepo/defaultds.go b/repo/fsrepo/defaultds.go deleted file mode 100644 index 8997bcaab7f..00000000000 --- a/repo/fsrepo/defaultds.go +++ /dev/null @@ -1,74 +0,0 @@ -package fsrepo - -import ( - "fmt" - "path" - - repo "github.com/ipfs/go-ipfs/repo" - config "github.com/ipfs/go-ipfs/repo/config" - "github.com/ipfs/go-ipfs/thirdparty/dir" - - levelds "gx/ipfs/QmPdvXuXWAR6gtxxqZw42RtSADMwz4ijVmYHGS542b6cMz/go-ds-leveldb" - measure "gx/ipfs/QmSb95iHExSSb47zpmyn5CyY5PZidVWSjyKyDqgYQrnKor/go-ds-measure" - flatfs "gx/ipfs/QmUTshC2PP4ZDqkrFfDU4JGJFMWjYnunxPgkQ6ZCA2hGqh/go-ds-flatfs" - ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" - mount "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/syncmount" - ldbopts "gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/opt" -) - -const ( - leveldbDirectory = "datastore" - flatfsDirectory = "blocks" -) - -func openDefaultDatastore(r *FSRepo) (repo.Datastore, error) { - leveldbPath := path.Join(r.path, leveldbDirectory) - - // save leveldb reference so it can be neatly closed afterward - leveldbDS, err := levelds.NewDatastore(leveldbPath, &levelds.Options{ - Compression: ldbopts.NoCompression, - }) - if err != nil { - return nil, fmt.Errorf("unable to open leveldb datastore: %v", err) - } - - syncfs := !r.config.Datastore.NoSync - - // 2 characters of base32 suffix gives us 10 bits of freedom. - // Leaving us with 10 bits, or 1024 way sharding - blocksDS, err := flatfs.CreateOrOpen(path.Join(r.path, flatfsDirectory), flatfs.NextToLast(2), syncfs) - if err != nil { - return nil, fmt.Errorf("unable to open flatfs datastore: %v", err) - } - - prefix := "ipfs.fsrepo.datastore." - metricsBlocks := measure.New(prefix+"blocks", blocksDS) - metricsLevelDB := measure.New(prefix+"leveldb", leveldbDS) - mountDS := mount.New([]mount.Mount{ - { - Prefix: ds.NewKey("/blocks"), - Datastore: metricsBlocks, - }, - { - Prefix: ds.NewKey("/"), - Datastore: metricsLevelDB, - }, - }) - - return mountDS, nil -} - -func initDefaultDatastore(repoPath string, conf *config.Config) error { - // The actual datastore contents are initialized lazily when Opened. - // During Init, we merely check that the directory is writeable. - leveldbPath := path.Join(repoPath, leveldbDirectory) - if err := dir.Writable(leveldbPath); err != nil { - return fmt.Errorf("datastore: %s", err) - } - - flatfsPath := path.Join(repoPath, flatfsDirectory) - if err := dir.Writable(flatfsPath); err != nil { - return fmt.Errorf("datastore: %s", err) - } - return nil -} diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 34cb22b329a..0eaa8049ca6 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -32,7 +32,7 @@ import ( var log = logging.Logger("fsrepo") // version number that we are currently expecting to see -var RepoVersion = 5 +var RepoVersion = 6 var migrationInstructions = `See https://github.com/ipfs/fs-repo-migrations/blob/master/run.md Sorry for the inconvenience. In the future, these will run automatically.` @@ -62,6 +62,8 @@ func (err NoRepoError) Error() string { const apiFile = "api" const swarmKeyFile = "swarm.key" +const specFn = "datastore_spec" + var ( // packageLock must be held to while performing any operation that modifies an @@ -240,9 +242,29 @@ func initConfig(path string, conf *config.Config) error { if err := serialize.WriteConfigFile(configFilename, conf); err != nil { return err } + return nil } +func initSpec(path string, conf map[string]interface{}) error { + fn, err := config.Path(path, specFn) + if err != nil { + return err + } + + if util.FileExists(fn) { + return nil + } + + dsc, err := AnyDatastoreConfig(conf) + if err != nil { + return err + } + bytes := dsc.DiskSpec().Bytes() + + return ioutil.WriteFile(fn, bytes, 0600) +} + // Init initializes a new FSRepo at the given path with the provided config. // TODO add support for custom datastores. func Init(repoPath string, conf *config.Config) error { @@ -260,7 +282,7 @@ func Init(repoPath string, conf *config.Config) error { return err } - if err := initDefaultDatastore(repoPath, conf); err != nil { + if err := initSpec(repoPath, conf.Datastore.Spec); err != nil { return err } @@ -361,16 +383,32 @@ func (r *FSRepo) openKeystore() error { // openDatastore returns an error if the config file is not present. func (r *FSRepo) openDatastore() error { - switch r.config.Datastore.Type { - case "default", "leveldb", "": - d, err := openDefaultDatastore(r) - if err != nil { - return err - } - r.ds = d - default: - return fmt.Errorf("unknown datastore type: %s", r.config.Datastore.Type) + if r.config.Datastore.Type != "" || r.config.Datastore.Path != "" { + return fmt.Errorf("old style datatstore config detected") + } else if r.config.Datastore.Spec == nil { + return fmt.Errorf("required Datastore.Spec entry missing form config file") + } + + dsc, err := AnyDatastoreConfig(r.config.Datastore.Spec) + if err != nil { + return err + } + spec := dsc.DiskSpec() + + oldSpec, err := r.readSpec() + if err != nil { + return err + } + if oldSpec != spec.String() { + return fmt.Errorf("Datastore configuration of '%s' does not match what is on disk '%s'", + oldSpec, spec.String()) + } + + d, err := dsc.Create(r.path) + if err != nil { + return err } + r.ds = d // Wrap it with metrics gathering prefix := "ipfs.fsrepo.datastore" @@ -379,6 +417,18 @@ func (r *FSRepo) openDatastore() error { return nil } +func (r *FSRepo) readSpec() (string, error) { + fn, err := config.Path(r.path, specFn) + if err != nil { + return "", err + } + b, err := ioutil.ReadFile(fn) + if err != nil { + return "", err + } + return strings.TrimSpace(string(b)), nil +} + // Close closes the FSRepo, releasing held resources. func (r *FSRepo) Close() error { packageLock.Lock() diff --git a/repo/fsrepo/fsrepo_test.go b/repo/fsrepo/fsrepo_test.go index d23ffe095a0..73ae54ab22a 100644 --- a/repo/fsrepo/fsrepo_test.go +++ b/repo/fsrepo/fsrepo_test.go @@ -25,7 +25,7 @@ func TestInitIdempotence(t *testing.T) { t.Parallel() path := testRepoPath("", t) for i := 0; i < 10; i++ { - assert.Nil(Init(path, &config.Config{}), t, "multiple calls to init should succeed") + assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "multiple calls to init should succeed") } } @@ -40,8 +40,8 @@ func TestCanManageReposIndependently(t *testing.T) { pathB := testRepoPath("b", t) t.Log("initialize two repos") - assert.Nil(Init(pathA, &config.Config{}), t, "a", "should initialize successfully") - assert.Nil(Init(pathB, &config.Config{}), t, "b", "should initialize successfully") + assert.Nil(Init(pathA, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "a", "should initialize successfully") + assert.Nil(Init(pathB, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "b", "should initialize successfully") t.Log("ensure repos initialized") assert.True(IsInitialized(pathA), t, "a should be initialized") @@ -67,7 +67,7 @@ func TestDatastoreGetNotAllowedAfterClose(t *testing.T) { path := testRepoPath("test", t) assert.True(!IsInitialized(path), t, "should NOT be initialized") - assert.Nil(Init(path, &config.Config{}), t, "should initialize successfully") + assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t, "should initialize successfully") r, err := Open(path) assert.Nil(err, t, "should open successfully") @@ -84,7 +84,7 @@ func TestDatastorePersistsFromRepoToRepo(t *testing.T) { t.Parallel() path := testRepoPath("test", t) - assert.Nil(Init(path, &config.Config{}), t) + assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t) r1, err := Open(path) assert.Nil(err, t) @@ -106,7 +106,7 @@ func TestDatastorePersistsFromRepoToRepo(t *testing.T) { func TestOpenMoreThanOnceInSameProcess(t *testing.T) { t.Parallel() path := testRepoPath("", t) - assert.Nil(Init(path, &config.Config{}), t) + assert.Nil(Init(path, &config.Config{Datastore: config.DefaultDatastoreConfig()}), t) r1, err := Open(path) assert.Nil(err, t, "first repo should open successfully") diff --git a/repo/fsrepo/migrations/migrations.go b/repo/fsrepo/migrations/migrations.go index fea6b2d0073..0d866450332 100644 --- a/repo/fsrepo/migrations/migrations.go +++ b/repo/fsrepo/migrations/migrations.go @@ -15,7 +15,7 @@ import ( "strings" ) -var DistPath = "https://ipfs.io/ipfs/QmNjXP6N98fYT1i7abgeBHmdR5WoeBeik4DtGiX9iFWK31" +var DistPath = "https://ipfs.io/ipfs/QmR6d2gePmBmsqkESwjpbLrHHJGLx2qDyWysqpuNcJgsnK" func init() { if dist := os.Getenv("IPFS_DIST_PATH"); dist != "" { diff --git a/test/sharness/t0024-datastore-config.sh b/test/sharness/t0024-datastore-config.sh new file mode 100755 index 00000000000..28c1b1e9d0c --- /dev/null +++ b/test/sharness/t0024-datastore-config.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +test_description="Test datastore config" + +. lib/test-lib.sh + +test_init_ipfs + +test_launch_ipfs_daemon +test_kill_ipfs_daemon + +SPEC_NOSYNC=$(cat ../t0024-files/spec-nosync) + +SPEC_NEWSHARDFUN=$(cat ../t0024-files/spec-newshardfun) + +test_expect_success "change runtime value in spec config" ' + ipfs config --json Datastore.Spec "$SPEC_NOSYNC" +' + +test_launch_ipfs_daemon +test_kill_ipfs_daemon + +test_expect_success "change on-disk value in spec config" ' + ipfs config --json Datastore.Spec "$SPEC_NEWSHARDFUN" +' + +test_expect_success "can not launch daemon after on-disk value change" ' + test_must_fail ipfs daemon +' + +test_done diff --git a/test/sharness/t0024-files/spec-newshardfun b/test/sharness/t0024-files/spec-newshardfun new file mode 100644 index 00000000000..cad2193f4e9 --- /dev/null +++ b/test/sharness/t0024-files/spec-newshardfun @@ -0,0 +1,26 @@ +{ + "mounts": [ + { + "child": { + "path": "blocks", + "shardFunc": "/repo/flatfs/shard/v1/next-to-last/3", + "sync": true, + "type": "flatfs" + }, + "mountpoint": "/blocks", + "prefix": "flatfs.datastore", + "type": "measure" + }, + { + "child": { + "compression": "none", + "path": "datastore", + "type": "levelds" + }, + "mountpoint": "/", + "prefix": "leveldb.datastore", + "type": "measure" + } + ], + "type": "mount" +} diff --git a/test/sharness/t0024-files/spec-nosync b/test/sharness/t0024-files/spec-nosync new file mode 100644 index 00000000000..8037aed6ebf --- /dev/null +++ b/test/sharness/t0024-files/spec-nosync @@ -0,0 +1,26 @@ +{ + "mounts": [ + { + "child": { + "path": "blocks", + "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", + "sync": false, + "type": "flatfs" + }, + "mountpoint": "/blocks", + "prefix": "flatfs.datastore", + "type": "measure" + }, + { + "child": { + "compression": "none", + "path": "datastore", + "type": "levelds" + }, + "mountpoint": "/", + "prefix": "leveldb.datastore", + "type": "measure" + } + ], + "type": "mount" +} diff --git a/test/sharness/t0066-migration.sh b/test/sharness/t0066-migration.sh index 49587fe9d7e..8b4ea645f76 100755 --- a/test/sharness/t0066-migration.sh +++ b/test/sharness/t0066-migration.sh @@ -34,7 +34,7 @@ test_expect_success "ipfs daemon --migrate=true runs migration" ' test_expect_code 1 ipfs daemon --migrate=true > true_out ' -test_expect_success "output looks good" ' +test_expect_failure "output looks good" ' grep "Running: " true_out > /dev/null && grep "Success: fs-repo has been migrated to version 5." true_out > /dev/null '