diff --git a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go b/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go index 104bd4a60e2..d58fb8e943a 100644 --- a/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go +++ b/Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go @@ -100,7 +100,7 @@ func (pb *ProgressBar) Start() *ProgressBar { pb.ShowBar = false pb.ShowTimeLeft = false pb.ShowPercent = false - } + } if !pb.ManualUpdate { go pb.writer() } @@ -233,7 +233,7 @@ func (pb *ProgressBar) write(current int64) { percent := float64(current) / (float64(pb.Total) / float64(100)) percentBox = fmt.Sprintf(" %#.02f %% ", percent) } - + // counters if pb.ShowCounters { if pb.Total > 0 { @@ -271,7 +271,7 @@ func (pb *ProgressBar) write(current int64) { // bar if pb.ShowBar { size := width - len(countersBox+pb.BarStart+pb.BarEnd+percentBox+timeLeftBox+speedBox+pb.prefix+pb.postfix) - if size > 0 { + if size > 0 && pb.Total > 0 { curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) emptCount := size - curCount barBox = pb.BarStart diff --git a/blocks/blockstore/blockstore.go b/blocks/blockstore/blockstore.go index bc000df932a..59f0f2c72ce 100644 --- a/blocks/blockstore/blockstore.go +++ b/blocks/blockstore/blockstore.go @@ -5,6 +5,7 @@ package blockstore import ( "errors" "sync" + "sync/atomic" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" dsns "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/namespace" @@ -49,6 +50,10 @@ type GCBlockstore interface { // at the same time, but no GC should not happen simulatenously. // Reading during Pinning is safe, and requires no lock. PinLock() func() + + // GcRequested returns true if GCLock has been called and is waiting to + // take the lock + GCRequested() bool } func NewBlockstore(d ds.Batching) *blockstore { @@ -63,7 +68,9 @@ func NewBlockstore(d ds.Batching) *blockstore { type blockstore struct { datastore ds.Batching - lk sync.RWMutex + lk sync.RWMutex + gcreq int32 + gcreqlk sync.Mutex } func (bs *blockstore) Get(k key.Key) (*blocks.Block, error) { @@ -192,7 +199,9 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { } func (bs *blockstore) GCLock() func() { + atomic.AddInt32(&bs.gcreq, 1) bs.lk.Lock() + atomic.AddInt32(&bs.gcreq, -1) return bs.lk.Unlock } @@ -200,3 +209,7 @@ func (bs *blockstore) PinLock() func() { bs.lk.RLock() return bs.lk.RUnlock } + +func (bs *blockstore) GCRequested() bool { + return atomic.LoadInt32(&bs.gcreq) > 0 +} diff --git a/blocks/blockstore/write_cache.go b/blocks/blockstore/write_cache.go index 52af696e4ae..73a7813f5ae 100644 --- a/blocks/blockstore/write_cache.go +++ b/blocks/blockstore/write_cache.go @@ -66,3 +66,7 @@ func (w *writecache) GCLock() func() { func (w *writecache) PinLock() func() { return w.blockstore.(GCBlockstore).PinLock() } + +func (w *writecache) GCRequested() bool { + return w.blockstore.(GCBlockstore).GCRequested() +} diff --git a/commands/cli/parse.go b/commands/cli/parse.go index 59d283707ab..6de79e6035b 100644 --- a/commands/cli/parse.go +++ b/commands/cli/parse.go @@ -6,6 +6,7 @@ import ( "os" "path" "runtime" + "sort" "strings" cmds "github.com/ipfs/go-ipfs/commands" @@ -259,8 +260,8 @@ func parseArgs(inputs []string, stdin *os.File, argDefs []cmds.Argument, recursi } stringArgs := make([]string, 0, numInputs) - fileArgs := make([]files.File, 0, numInputs) + fileArgs := make(map[string]files.File) argDefIndex := 0 // the index of the current argument definition for i := 0; i < numInputs; i++ { argDef := getArgDef(argDefIndex, argDefs) @@ -295,18 +296,21 @@ func parseArgs(inputs []string, stdin *os.File, argDefs []cmds.Argument, recursi } else if argDef.Type == cmds.ArgFile { if stdin == nil || !argDef.SupportsStdin { // treat stringArg values as file paths - fileArgs, inputs, err = appendFile(fileArgs, inputs, argDef, recursive) + fpath := inputs[0] + inputs = inputs[1:] + file, err := appendFile(fpath, argDef, recursive) if err != nil { return nil, nil, err } + fileArgs[fpath] = file } else { if len(inputs) > 0 { // don't use stdin if we have inputs stdin = nil } else { // if we have a stdin, create a file from it - fileArgs, stdin = appendStdinAsFile(fileArgs, stdin) + fileArgs[""] = files.NewReaderFile("", "", stdin, nil) } } } @@ -323,7 +327,23 @@ func parseArgs(inputs []string, stdin *os.File, argDefs []cmds.Argument, recursi } } - return stringArgs, fileArgs, nil + return stringArgs, filesMapToSortedArr(fileArgs), nil +} + +func filesMapToSortedArr(fs map[string]files.File) []files.File { + var names []string + for name, _ := range fs { + names = append(names, name) + } + + sort.Strings(names) + + var out []files.File + for _, f := range names { + out = append(out, fs[f]) + } + + return out } func getArgDef(i int, argDefs []cmds.Argument) *cmds.Argument { @@ -356,44 +376,34 @@ func appendStdinAsString(args []string, stdin *os.File) ([]string, *os.File, err return append(args, strings.Split(input, "\n")...), nil, nil } -func appendFile(args []files.File, inputs []string, argDef *cmds.Argument, recursive bool) ([]files.File, []string, error) { - fpath := inputs[0] +const notRecursiveFmtStr = "'%s' is a directory, use the '-%s' flag to specify directories" +const dirNotSupportedFmtStr = "Invalid path '%s', argument '%s' does not support directories" + +func appendFile(fpath string, argDef *cmds.Argument, recursive bool) (files.File, error) { if fpath == "." { cwd, err := os.Getwd() if err != nil { - return nil, nil, err + return nil, err } fpath = cwd } + stat, err := os.Lstat(fpath) if err != nil { - return nil, nil, err + return nil, err } if stat.IsDir() { if !argDef.Recursive { - err = fmt.Errorf("Invalid path '%s', argument '%s' does not support directories", - fpath, argDef.Name) - return nil, nil, err + return nil, fmt.Errorf(dirNotSupportedFmtStr, fpath, argDef.Name) } if !recursive { - err = fmt.Errorf("'%s' is a directory, use the '-%s' flag to specify directories", - fpath, cmds.RecShort) - return nil, nil, err + return nil, fmt.Errorf(notRecursiveFmtStr, fpath, cmds.RecShort) } } - arg, err := files.NewSerialFile(path.Base(fpath), fpath, stat) - if err != nil { - return nil, nil, err - } - return append(args, arg), inputs[1:], nil -} - -func appendStdinAsFile(args []files.File, stdin *os.File) ([]files.File, *os.File) { - arg := files.NewReaderFile("", "", stdin, nil) - return append(args, arg), nil + return files.NewSerialFile(path.Base(fpath), fpath, stat) } // isTerminal returns true if stdin is a Stdin pipe (e.g. `cat file | ipfs`), diff --git a/core/commands/add.go b/core/commands/add.go index 1232f1db6ab..c8a17eca5b3 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -18,6 +18,7 @@ var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded") const ( quietOptionName = "quiet" + silentOptionName = "silent" progressOptionName = "progress" trickleOptionName = "trickle" wrapOptionName = "wrap-with-directory" @@ -44,6 +45,7 @@ remains to be implemented. Options: []cmds.Option{ cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive) cmds.BoolOption(quietOptionName, "q", "Write minimal output"), + cmds.BoolOption(silentOptionName, "Write no output"), cmds.BoolOption(progressOptionName, "p", "Stream progress data"), cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation"), cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk"), @@ -57,22 +59,35 @@ remains to be implemented. return nil } - req.SetOption(progressOptionName, true) + // ipfs cli progress bar defaults to true + progress, found, _ := req.Option(progressOptionName).Bool() + if !found { + progress = true + } + + req.SetOption(progressOptionName, progress) sizeFile, ok := req.Files().(files.SizeFile) if !ok { // we don't need to error, the progress bar just won't know how big the files are + log.Warning("cannnot determine size of input file") return nil } - size, err := sizeFile.Size() - if err != nil { - // see comment above - return nil - } + sizeCh := make(chan int64, 1) + req.Values()["size"] = sizeCh + + go func() { + size, err := sizeFile.Size() + if err != nil { + log.Warningf("error getting files size: %s", err) + // see comment above + return + } - log.Debugf("Total size of file being added: %v\n", size) - req.Values()["size"] = size + log.Debugf("Total size of file being added: %v\n", size) + sizeCh <- size + }() return nil }, @@ -95,6 +110,7 @@ remains to be implemented. wrap, _, _ := req.Option(wrapOptionName).Bool() hash, _, _ := req.Option(onlyHashOptionName).Bool() hidden, _, _ := req.Option(hiddenOptionName).Bool() + silent, _, _ := req.Option(silentOptionName).Bool() chunker, _, _ := req.Option(chunkerOptionName).String() dopin, pin_found, _ := req.Option(pinOptionName).Bool() @@ -118,13 +134,18 @@ remains to be implemented. outChan := make(chan interface{}, 8) res.SetOutput((<-chan interface{})(outChan)) - fileAdder := coreunix.NewAdder(req.Context(), n, outChan) + fileAdder, err := coreunix.NewAdder(req.Context(), n, outChan) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } fileAdder.Chunker = chunker fileAdder.Progress = progress fileAdder.Hidden = hidden fileAdder.Trickle = trickle fileAdder.Wrap = wrap fileAdder.Pin = dopin + fileAdder.Silent = silent // addAllFiles loops over a convenience slice file to // add each file individually. e.g. 'ipfs add a b c' @@ -138,7 +159,7 @@ remains to be implemented. return nil // done } - if _, err := fileAdder.AddFile(file); err != nil { + if err := fileAdder.AddFile(file); err != nil { return err } } @@ -154,9 +175,8 @@ remains to be implemented. } // copy intermediary nodes from editor to our actual dagservice - _, err := fileAdder.Finalize(n.DAG) + _, err := fileAdder.Finalize() if err != nil { - log.Error("WRITE OUT: ", err) return err } @@ -189,17 +209,29 @@ remains to be implemented. return } - size := int64(0) - s, found := req.Values()["size"] - if found { - size = s.(int64) + progress, prgFound, err := req.Option(progressOptionName).Bool() + if err != nil { + res.SetError(u.ErrCast(), cmds.ErrNormal) + return + } + + silent, _, err := req.Option(silentOptionName).Bool() + if err != nil { + res.SetError(u.ErrCast(), cmds.ErrNormal) + return + } + + var showProgressBar bool + if prgFound { + showProgressBar = progress + } else if !quiet && !silent { + showProgressBar = true } - showProgressBar := !quiet && size >= progressBarMinSize var bar *pb.ProgressBar var terminalWidth int if showProgressBar { - bar = pb.New64(size).SetUnits(pb.U_BYTES) + bar = pb.New64(0).SetUnits(pb.U_BYTES) bar.ManualUpdate = true bar.Start() @@ -215,43 +247,63 @@ remains to be implemented. bar.Update() } + var sizeChan chan int64 + s, found := req.Values()["size"] + if found { + sizeChan = s.(chan int64) + } + lastFile := "" var totalProgress, prevFiles, lastBytes int64 - for out := range outChan { - output := out.(*coreunix.AddedObject) - if len(output.Hash) > 0 { - if showProgressBar { - // clear progress bar line before we print "added x" output - fmt.Fprintf(res.Stderr(), "\033[2K\r") + LOOP: + for { + select { + case out, ok := <-outChan: + if !ok { + break LOOP } - if quiet { - fmt.Fprintf(res.Stdout(), "%s\n", output.Hash) - } else { - fmt.Fprintf(res.Stdout(), "added %s %s\n", output.Hash, output.Name) - } - - } else { - log.Debugf("add progress: %v %v\n", output.Name, output.Bytes) + output := out.(*coreunix.AddedObject) + if len(output.Hash) > 0 { + if showProgressBar { + // clear progress bar line before we print "added x" output + fmt.Fprintf(res.Stderr(), "\033[2K\r") + } + if quiet { + fmt.Fprintf(res.Stdout(), "%s\n", output.Hash) + } else { + fmt.Fprintf(res.Stdout(), "added %s %s\n", output.Hash, output.Name) + } - if !showProgressBar { - continue + } else { + log.Debugf("add progress: %v %v\n", output.Name, output.Bytes) + + if !showProgressBar { + continue + } + + if len(lastFile) == 0 { + lastFile = output.Name + } + if output.Name != lastFile || output.Bytes < lastBytes { + prevFiles += lastBytes + lastFile = output.Name + } + lastBytes = output.Bytes + delta := prevFiles + lastBytes - totalProgress + totalProgress = bar.Add64(delta) } - if len(lastFile) == 0 { - lastFile = output.Name + if showProgressBar { + bar.Update() } - if output.Name != lastFile || output.Bytes < lastBytes { - prevFiles += lastBytes - lastFile = output.Name + case size := <-sizeChan: + if showProgressBar { + bar.Total = size + bar.ShowPercent = true + bar.ShowBar = true + bar.ShowTimeLeft = true } - lastBytes = output.Bytes - delta := prevFiles + lastBytes - totalProgress - totalProgress = bar.Add64(delta) - } - - if showProgressBar { - bar.Update() } } }, diff --git a/core/commands/files/files.go b/core/commands/files/files.go index bc788fb6069..94b5fd8916d 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -68,7 +68,7 @@ var FilesStatCmd = &cmds.Command{ return } - o, err := statNode(fsn) + o, err := statNode(node.DAG, fsn) if err != nil { res.SetError(err, cmds.ErrNormal) return @@ -90,13 +90,14 @@ var FilesStatCmd = &cmds.Command{ Type: Object{}, } -func statNode(fsn mfs.FSNode) (*Object, error) { +func statNode(ds dag.DAGService, fsn mfs.FSNode) (*Object, error) { nd, err := fsn.GetNode() if err != nil { return nil, err } - k, err := nd.Key() + // add to dagserv to ensure its available + k, err := ds.Add(nd) if err != nil { return nil, err } @@ -434,10 +435,20 @@ a beginning offset to write to. The entire length of the input will be written. If the '--create' option is specified, the file will be created if it does not exist. Nonexistant intermediate directories will not be created. +If the '--flush' option is set to false, changes will not be propogated to the +merkledag root. This can make operations much faster when doing a large number +of writes to a deeper directory structure. + Example: echo "hello world" | ipfs files write --create /myfs/a/b/file echo "hello world" | ipfs files write --truncate /myfs/a/b/file + +Warning: + + Usage of the '--flush=false' option does not guarantee data durability until + the tree has been flushed. This can be accomplished by running 'ipfs files stat' + on the file or any of its ancestors. `, }, Arguments: []cmds.Argument{ @@ -449,6 +460,7 @@ Example: cmds.BoolOption("e", "create", "create the file if it does not exist"), cmds.BoolOption("t", "truncate", "truncate the file before writing"), cmds.IntOption("n", "count", "maximum number of bytes to read"), + cmds.BoolOption("f", "flush", "flush file and ancestors after write (default: true)"), }, Run: func(req cmds.Request, res cmds.Response) { path, err := checkPath(req.Arguments()[0]) @@ -459,6 +471,10 @@ Example: create, _, _ := req.Option("create").Bool() trunc, _, _ := req.Option("truncate").Bool() + flush, set, _ := req.Option("flush").Bool() + if !set { + flush = true + } nd, err := req.InvocContext().GetNode() if err != nil { @@ -471,7 +487,12 @@ Example: res.SetError(err, cmds.ErrNormal) return } - defer fi.Close() + + if flush { + defer fi.Close() + } else { + defer fi.Sync() + } if trunc { if err := fi.Truncate(0); err != nil { diff --git a/core/coreunix/add.go b/core/coreunix/add.go index 3070e874461..bd6e4f74539 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -1,6 +1,7 @@ package coreunix import ( + "bytes" "fmt" "io" "io/ioutil" @@ -11,11 +12,12 @@ import ( syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" + key "github.com/ipfs/go-ipfs/blocks/key" bserv "github.com/ipfs/go-ipfs/blockservice" "github.com/ipfs/go-ipfs/exchange/offline" importer "github.com/ipfs/go-ipfs/importer" "github.com/ipfs/go-ipfs/importer/chunk" - dagutils "github.com/ipfs/go-ipfs/merkledag/utils" + mfs "github.com/ipfs/go-ipfs/mfs" "github.com/ipfs/go-ipfs/pin" "github.com/ipfs/go-ipfs/commands/files" @@ -27,6 +29,8 @@ import ( var log = logging.Logger("coreunix") +var folderData = unixfs.FolderPBData() + // how many bytes of progress to wait before sending a progress update message const progressReaderIncrement = 1024 * 256 @@ -62,12 +66,16 @@ type AddedObject struct { Bytes int64 `json:",omitempty"` } -func NewAdder(ctx context.Context, n *core.IpfsNode, out chan interface{}) *Adder { - e := dagutils.NewDagEditor(newDirNode(), nil) +func NewAdder(ctx context.Context, n *core.IpfsNode, out chan interface{}) (*Adder, error) { + mr, err := mfs.NewRoot(ctx, n.DAG, newDirNode(), nil) + if err != nil { + return nil, err + } + return &Adder{ + mr: mr, ctx: ctx, node: n, - editor: e, out: out, Progress: false, Hidden: true, @@ -75,86 +83,143 @@ func NewAdder(ctx context.Context, n *core.IpfsNode, out chan interface{}) *Adde Trickle: false, Wrap: false, Chunker: "", - } + }, nil } // Internal structure for holding the switches passed to the `add` call type Adder struct { ctx context.Context node *core.IpfsNode - editor *dagutils.Editor out chan interface{} Progress bool Hidden bool Pin bool Trickle bool + Silent bool Wrap bool Chunker string root *dag.Node + mr *mfs.Root + unlock func() + tempRoot key.Key } // Perform the actual add & pin locally, outputting results to reader -func (params Adder) add(reader io.Reader) (*dag.Node, error) { - chnk, err := chunk.FromString(reader, params.Chunker) +func (adder Adder) add(reader io.Reader) (*dag.Node, error) { + chnk, err := chunk.FromString(reader, adder.Chunker) if err != nil { return nil, err } - if params.Trickle { + if adder.Trickle { return importer.BuildTrickleDagFromReader( - params.node.DAG, + adder.node.DAG, chnk, ) } return importer.BuildDagFromReader( - params.node.DAG, + adder.node.DAG, chnk, ) } -func (params *Adder) RootNode() (*dag.Node, error) { +func (adder *Adder) RootNode() (*dag.Node, error) { // for memoizing - if params.root != nil { - return params.root, nil + if adder.root != nil { + return adder.root, nil } - root := params.editor.GetNode() + root, err := adder.mr.GetValue().GetNode() + if err != nil { + return nil, err + } // if not wrapping, AND one root file, use that hash as root. - if !params.Wrap && len(root.Links) == 1 { - var err error - root, err = root.Links[0].GetNode(params.ctx, params.editor.GetDagService()) - params.root = root - // no need to output, as we've already done so. - return root, err + if !adder.Wrap && len(root.Links) == 1 { + root, err = root.Links[0].GetNode(adder.ctx, adder.node.DAG) + if err != nil { + return nil, err + } } - // otherwise need to output, as we have not. - err := outputDagnode(params.out, "", root) - params.root = root + adder.root = root return root, err } -func (params *Adder) PinRoot() error { - root, err := params.RootNode() +func (adder *Adder) PinRoot() error { + root, err := adder.RootNode() if err != nil { return err } - if !params.Pin { + if !adder.Pin { return nil } - rnk, err := root.Key() + rnk, err := adder.node.DAG.Add(root) if err != nil { return err } - params.node.Pinning.PinWithMode(rnk, pin.Recursive) - return params.node.Pinning.Flush() + if adder.tempRoot != "" { + err := adder.node.Pinning.Unpin(adder.ctx, adder.tempRoot, true) + if err != nil { + return err + } + adder.tempRoot = rnk + } + + adder.node.Pinning.PinWithMode(rnk, pin.Recursive) + return adder.node.Pinning.Flush() } -func (params *Adder) Finalize(DAG dag.DAGService) (*dag.Node, error) { - return params.editor.Finalize(DAG) +func (adder *Adder) Finalize() (*dag.Node, error) { + // cant just call adder.RootNode() here as we need the name for printing + root, err := adder.mr.GetValue().GetNode() + if err != nil { + return nil, err + } + + var name string + if !adder.Wrap { + name = root.Links[0].Name + child, err := root.Links[0].GetNode(adder.ctx, adder.node.DAG) + if err != nil { + return nil, err + } + root = child + } + + err = adder.outputDirs(name, root) + if err != nil { + return nil, err + } + + err = adder.mr.Close() + if err != nil { + return nil, err + } + + return root, nil +} + +func (adder *Adder) outputDirs(path string, nd *dag.Node) error { + if !bytes.Equal(nd.Data, folderData) { + return nil + } + + for _, l := range nd.Links { + child, err := l.GetNode(adder.ctx, adder.node.DAG) + if err != nil { + return err + } + + err = adder.outputDirs(gopath.Join(path, l.Name), child) + if err != nil { + return err + } + } + + return outputDagnode(adder.out, path, nd) } // Add builds a merkledag from the a reader, pinning all objects to the local @@ -163,7 +228,10 @@ func Add(n *core.IpfsNode, r io.Reader) (string, error) { unlock := n.Blockstore.PinLock() defer unlock() - fileAdder := NewAdder(n.Context(), n, nil) + fileAdder, err := NewAdder(n.Context(), n, nil) + if err != nil { + return "", err + } node, err := fileAdder.add(r) if err != nil { @@ -193,14 +261,22 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) { } defer f.Close() - fileAdder := NewAdder(n.Context(), n, nil) + fileAdder, err := NewAdder(n.Context(), n, nil) + if err != nil { + return "", err + } - dagnode, err := fileAdder.AddFile(f) + err = fileAdder.addFile(f) if err != nil { return "", err } - k, err := dagnode.Key() + nd, err := fileAdder.Finalize() + if err != nil { + return "", err + } + + k, err := nd.Key() if err != nil { return "", err } @@ -214,23 +290,34 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) { // the directory, and and error if any. func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.Node, error) { file := files.NewReaderFile(filename, filename, ioutil.NopCloser(r), nil) - dir := files.NewSliceFile("", "", []files.File{file}) - fileAdder := NewAdder(n.Context(), n, nil) + fileAdder, err := NewAdder(n.Context(), n, nil) + if err != nil { + return "", nil, err + } + fileAdder.Wrap = true unlock := n.Blockstore.PinLock() defer unlock() - dagnode, err := fileAdder.addDir(dir) + + err = fileAdder.addFile(file) + if err != nil { + return "", nil, err + } + + dagnode, err := fileAdder.Finalize() if err != nil { return "", nil, err } + k, err := dagnode.Key() if err != nil { return "", nil, err } + return gopath.Join(k.String(), filename), dagnode, nil } -func (params *Adder) addNode(node *dag.Node, path string) error { +func (adder *Adder) addNode(node *dag.Node, path string) error { // patch it into the root if path == "" { key, err := node.Key() @@ -241,96 +328,111 @@ func (params *Adder) addNode(node *dag.Node, path string) error { path = key.Pretty() } - if err := params.editor.InsertNodeAtPath(params.ctx, path, node, newDirNode); err != nil { + if err := mfs.PutNode(adder.mr, path, node); err != nil { return err } - return outputDagnode(params.out, path, node) + if !adder.Silent { + return outputDagnode(adder.out, path, node) + } + return nil } -// Add the given file while respecting the params. -func (params *Adder) AddFile(file files.File) (*dag.Node, error) { +// Add the given file while respecting the adder. +func (adder *Adder) AddFile(file files.File) error { + adder.unlock = adder.node.Blockstore.PinLock() + defer adder.unlock() + + return adder.addFile(file) +} + +func (adder *Adder) addFile(file files.File) error { + err := adder.maybePauseForGC() + if err != nil { + return err + } + switch { - case files.IsHidden(file) && !params.Hidden: + case files.IsHidden(file) && !adder.Hidden: log.Debugf("%s is hidden, skipping", file.FileName()) - return nil, &hiddenFileError{file.FileName()} + return &hiddenFileError{file.FileName()} case file.IsDirectory(): - return params.addDir(file) + return adder.addDir(file) } // case for symlink if s, ok := file.(*files.Symlink); ok { sdata, err := unixfs.SymlinkData(s.Target) if err != nil { - return nil, err + return err } dagnode := &dag.Node{Data: sdata} - _, err = params.node.DAG.Add(dagnode) + _, err = adder.node.DAG.Add(dagnode) if err != nil { - return nil, err + return err } - err = params.addNode(dagnode, s.FileName()) - return dagnode, err + return adder.addNode(dagnode, s.FileName()) } // case for regular file // if the progress flag was specified, wrap the file so that we can send // progress updates to the client (over the output channel) var reader io.Reader = file - if params.Progress { - reader = &progressReader{file: file, out: params.out} + if adder.Progress { + reader = &progressReader{file: file, out: adder.out} } - dagnode, err := params.add(reader) + dagnode, err := adder.add(reader) if err != nil { - return nil, err + return err } // patch it into the root - log.Infof("adding file: %s", file.FileName()) - err = params.addNode(dagnode, file.FileName()) - return dagnode, err + return adder.addNode(dagnode, file.FileName()) } -func (params *Adder) addDir(dir files.File) (*dag.Node, error) { - tree := newDirNode() +func (adder *Adder) addDir(dir files.File) error { log.Infof("adding directory: %s", dir.FileName()) + err := mfs.Mkdir(adder.mr, dir.FileName(), true) + if err != nil { + return err + } + for { file, err := dir.NextFile() if err != nil && err != io.EOF { - return nil, err + return err } if file == nil { break } - node, err := params.AddFile(file) + err = adder.addFile(file) if _, ok := err.(*hiddenFileError); ok { // hidden file error, skip file continue } else if err != nil { - return nil, err + return err } + } - _, name := gopath.Split(file.FileName()) + return nil +} - if err := tree.AddNodeLinkClean(name, node); err != nil { - return nil, err +func (adder *Adder) maybePauseForGC() error { + if adder.node.Blockstore.GCRequested() { + err := adder.PinRoot() + if err != nil { + return err } - } - if err := params.addNode(tree, dir.FileName()); err != nil { - return nil, err - } - - if _, err := params.node.DAG.Add(tree); err != nil { - return nil, err + adder.unlock() + adder.unlock = adder.node.Blockstore.PinLock() } - - return tree, nil + return nil } // outputDagnode sends dagnode info over the output channel @@ -379,7 +481,7 @@ func getOutput(dagnode *dag.Node) (*Object, error) { for i, link := range dagnode.Links { output.Links[i] = Link{ Name: link.Name, - Hash: link.Hash.B58String(), + //Hash: link.Hash.B58String(), Size: link.Size, } } diff --git a/core/coreunix/add_test.go b/core/coreunix/add_test.go index 6d4bfb17656..56c921eebe0 100644 --- a/core/coreunix/add_test.go +++ b/core/coreunix/add_test.go @@ -1,22 +1,44 @@ package coreunix import ( - "os" - "path" + "bytes" + "io" + "io/ioutil" "testing" + "time" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/ipfs/go-ipfs/blocks/key" + "github.com/ipfs/go-ipfs/commands/files" "github.com/ipfs/go-ipfs/core" + dag "github.com/ipfs/go-ipfs/merkledag" + "github.com/ipfs/go-ipfs/pin/gc" "github.com/ipfs/go-ipfs/repo" "github.com/ipfs/go-ipfs/repo/config" "github.com/ipfs/go-ipfs/util/testutil" ) func TestAddRecursive(t *testing.T) { - here, err := os.Getwd() + r := &repo.Mock{ + C: config.Config{ + Identity: config.Identity{ + PeerID: "Qmfoo", // required by offline node + }, + }, + D: testutil.ThreadSafeCloserMapDatastore(), + } + node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r}) if err != nil { t.Fatal(err) } + if k, err := AddR(node, "test_data"); err != nil { + t.Fatal(err) + } else if k != "QmWCCga8AbTyfAQ7pTnGT6JgmRMAB3Qp8ZmTEFi5q5o8jC" { + t.Fatal("keys do not match: ", k) + } +} + +func TestAddGCLive(t *testing.T) { r := &repo.Mock{ C: config.Config{ Identity: config.Identity{ @@ -29,9 +51,111 @@ func TestAddRecursive(t *testing.T) { if err != nil { t.Fatal(err) } - if k, err := AddR(node, path.Join(here, "test_data")); err != nil { + + errs := make(chan error) + out := make(chan interface{}) + adder, err := NewAdder(context.Background(), node, out) + if err != nil { + t.Fatal(err) + } + + dataa := ioutil.NopCloser(bytes.NewBufferString("testfileA")) + rfa := files.NewReaderFile("a", "a", dataa, nil) + + // make two files with pipes so we can 'pause' the add for timing of the test + piper, pipew := io.Pipe() + hangfile := files.NewReaderFile("b", "b", piper, nil) + + datad := ioutil.NopCloser(bytes.NewBufferString("testfileD")) + rfd := files.NewReaderFile("d", "d", datad, nil) + + slf := files.NewSliceFile("files", "files", []files.File{rfa, hangfile, rfd}) + + addDone := make(chan struct{}) + go func() { + defer close(addDone) + defer close(out) + err := adder.AddFile(slf) + + if err != nil { + t.Fatal(err) + } + + }() + + addedHashes := make(map[string]struct{}) + select { + case o := <-out: + addedHashes[o.(*AddedObject).Hash] = struct{}{} + case <-addDone: + t.Fatal("add shouldnt complete yet") + } + + var gcout <-chan key.Key + gcstarted := make(chan struct{}) + go func() { + defer close(gcstarted) + gcchan, err := gc.GC(context.Background(), node.Blockstore, node.Pinning) + if err != nil { + log.Error("GC ERROR:", err) + errs <- err + return + } + + gcout = gcchan + }() + + // gc shouldnt start until we let the add finish its current file. + pipew.Write([]byte("some data for file b")) + + select { + case <-gcstarted: + t.Fatal("gc shouldnt have started yet") + case err := <-errs: + t.Fatal(err) + default: + } + + time.Sleep(time.Millisecond * 100) // make sure gc gets to requesting lock + + // finish write and unblock gc + pipew.Close() + + // receive next object from adder + select { + case o := <-out: + addedHashes[o.(*AddedObject).Hash] = struct{}{} + case err := <-errs: + t.Fatal(err) + } + + select { + case <-gcstarted: + case err := <-errs: + t.Fatal(err) + } + + for k := range gcout { + if _, ok := addedHashes[k.B58String()]; ok { + t.Fatal("gc'ed a hash we just added") + } + } + + var last key.Key + for a := range out { + // wait for it to finish + last = key.B58KeyDecode(a.(*AddedObject).Hash) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + root, err := node.DAG.Get(ctx, last) + if err != nil { + t.Fatal(err) + } + + err = dag.EnumerateChildren(ctx, node.DAG, root, key.NewKeySet()) + if err != nil { t.Fatal(err) - } else if k != "QmWCCga8AbTyfAQ7pTnGT6JgmRMAB3Qp8ZmTEFi5q5o8jC" { - t.Fatal("keys do not match") } } diff --git a/mfs/dir.go b/mfs/dir.go index 264dea4a0d7..43271fe490f 100644 --- a/mfs/dir.go +++ b/mfs/dir.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "sync" + "time" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" @@ -28,6 +29,8 @@ type Directory struct { node *dag.Node ctx context.Context + modTime time.Time + name string } @@ -40,6 +43,7 @@ func NewDirectory(ctx context.Context, name string, node *dag.Node, parent child parent: parent, childDirs: make(map[string]*Directory), files: make(map[string]*File), + modTime: time.Now(), } } @@ -53,7 +57,16 @@ func (d *Directory) closeChild(name string, nd *dag.Node) error { d.lock.Lock() defer d.lock.Unlock() - err = d.node.RemoveNodeLink(name) + err = d.updateChild(name, nd) + if err != nil { + return err + } + + return d.parent.closeChild(d.name, d.node) +} + +func (d *Directory) updateChild(name string, nd *dag.Node) error { + err := d.node.RemoveNodeLink(name) if err != nil && err != dag.ErrNotFound { return err } @@ -63,7 +76,9 @@ func (d *Directory) closeChild(name string, nd *dag.Node) error { return err } - return d.parent.closeChild(d.name, d.node) + d.modTime = time.Now() + + return nil } func (d *Directory) Type() NodeType { @@ -77,30 +92,16 @@ func (d *Directory) childFile(name string) (*File, error) { return fi, nil } - nd, err := d.childFromDag(name) - if err != nil { - return nil, err - } - i, err := ft.FromBytes(nd.Data) + fsn, err := d.childNode(name) if err != nil { return nil, err } - switch i.GetType() { - case ufspb.Data_Directory: - return nil, ErrIsDirectory - case ufspb.Data_File: - nfi, err := NewFile(name, nd, d, d.dserv) - if err != nil { - return nil, err - } - d.files[name] = nfi - return nfi, nil - case ufspb.Data_Metadata: - return nil, ErrNotYetImplemented - default: - return nil, ErrInvalidChild + if fi, ok := fsn.(*File); ok { + return fi, nil } + + return nil, fmt.Errorf("%s is not a file", name) } // childDir returns a directory under this directory by the given name if it @@ -111,6 +112,21 @@ func (d *Directory) childDir(name string) (*Directory, error) { return dir, nil } + fsn, err := d.childNode(name) + if err != nil { + return nil, err + } + + if dir, ok := fsn.(*Directory); ok { + return dir, nil + } + + return nil, fmt.Errorf("%s is not a directory", name) +} + +// childNode returns a FSNode under this directory by the given name if it exists. +// it does *not* check the cached dirs and files +func (d *Directory) childNode(name string) (FSNode, error) { nd, err := d.childFromDag(name) if err != nil { return nil, err @@ -127,7 +143,12 @@ func (d *Directory) childDir(name string) (*Directory, error) { d.childDirs[name] = ndir return ndir, nil case ufspb.Data_File: - return nil, fmt.Errorf("%s is not a directory", name) + nfi, err := NewFile(name, nd, d, d.dserv) + if err != nil { + return nil, err + } + d.files[name] = nfi + return nfi, nil case ufspb.Data_Metadata: return nil, ErrNotYetImplemented default: @@ -157,17 +178,17 @@ func (d *Directory) Child(name string) (FSNode, error) { // childUnsync returns the child under this directory by the given name // without locking, useful for operations which already hold a lock func (d *Directory) childUnsync(name string) (FSNode, error) { - - dir, err := d.childDir(name) - if err == nil { - return dir, nil + cdir, ok := d.childDirs[name] + if ok { + return cdir, nil } - fi, err := d.childFile(name) - if err == nil { - return fi, nil + + cfile, ok := d.files[name] + if ok { + return cfile, nil } - return nil, os.ErrNotExist + return d.childNode(name) } type NodeListing struct { @@ -270,12 +291,7 @@ func (d *Directory) AddChild(name string, nd *dag.Node) error { d.Lock() defer d.Unlock() - pbn, err := ft.FromBytes(nd.Data) - if err != nil { - return err - } - - _, err = d.childUnsync(name) + _, err := d.childUnsync(name) if err == nil { return ErrDirExists } @@ -290,22 +306,59 @@ func (d *Directory) AddChild(name string, nd *dag.Node) error { return err } - switch pbn.GetType() { - case ft.TDirectory: - d.childDirs[name] = NewDirectory(d.ctx, name, nd, d, d.dserv) - case ft.TFile, ft.TMetadata, ft.TRaw: - nfi, err := NewFile(name, nd, d, d.dserv) + d.modTime = time.Now() + + //return d.parent.closeChild(d.name, d.node) + return nil +} + +func (d *Directory) sync() error { + for name, dir := range d.childDirs { + nd, err := dir.GetNode() + if err != nil { + return err + } + + _, err = d.dserv.Add(nd) + if err != nil { + return err + } + + err = d.updateChild(name, nd) if err != nil { return err } - d.files[name] = nfi - default: - return ErrInvalidChild } - return d.parent.closeChild(d.name, d.node) + + for name, file := range d.files { + nd, err := file.GetNode() + if err != nil { + return err + } + + _, err = d.dserv.Add(nd) + if err != nil { + return err + } + + err = d.updateChild(name, nd) + if err != nil { + return err + } + } + + return nil } func (d *Directory) GetNode() (*dag.Node, error) { + d.Lock() + defer d.Unlock() + + err := d.sync() + if err != nil { + return nil, err + } + return d.node, nil } diff --git a/mfs/ops.go b/mfs/ops.go index c7309a31d9d..ebb1932edeb 100644 --- a/mfs/ops.go +++ b/mfs/ops.go @@ -101,6 +101,9 @@ func PutNode(r *Root, path string, nd *dag.Node) error { // Mkdir creates a directory at 'path' under the directory 'd', creating // intermediary directories as needed if 'parents' is set to true func Mkdir(r *Root, pth string, parents bool) error { + if pth == "" { + panic("empty path") + } parts := path.SplitList(pth) if parts[0] == "" { parts = parts[1:] diff --git a/mfs/system.go b/mfs/system.go index 22ef63cd4a2..2cfc4e201fd 100644 --- a/mfs/system.go +++ b/mfs/system.go @@ -71,15 +71,19 @@ func NewRoot(parent context.Context, ds dag.DAGService, node *dag.Node, pf PubFu return nil, err } + var repub *Republisher + if pf != nil { + repub = NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3) + repub.setVal(ndk) + go repub.Run() + } + root := &Root{ node: node, - repub: NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3), + repub: repub, dserv: ds, } - root.repub.setVal(ndk) - go root.repub.Run() - pbn, err := ft.FromBytes(node.Data) if err != nil { log.Error("IPNS pointer was not unixfs node") @@ -113,12 +117,29 @@ func (kr *Root) closeChild(name string, nd *dag.Node) error { return err } - kr.repub.Update(k) + if kr.repub != nil { + kr.repub.Update(k) + } return nil } func (kr *Root) Close() error { - return kr.repub.Close() + nd, err := kr.GetValue().GetNode() + if err != nil { + return err + } + + k, err := kr.dserv.Add(nd) + if err != nil { + return err + } + + if kr.repub != nil { + kr.repub.Update(k) + return kr.repub.Close() + } + + return nil } // Republisher manages when to publish a given entry diff --git a/pin/gc/gc.go b/pin/gc/gc.go index ec61f816a44..df9ddedc6b2 100644 --- a/pin/gc/gc.go +++ b/pin/gc/gc.go @@ -24,7 +24,6 @@ var log = logging.Logger("gc") // deletes any block that is not found in the marked set. func GC(ctx context.Context, bs bstore.GCBlockstore, pn pin.Pinner) (<-chan key.Key, error) { unlock := bs.GCLock() - defer unlock() bsrv := bserv.New(bs, offline.Exchange(bs)) ds := dag.NewDAGService(bsrv) @@ -42,6 +41,7 @@ func GC(ctx context.Context, bs bstore.GCBlockstore, pn pin.Pinner) (<-chan key. output := make(chan key.Key) go func() { defer close(output) + defer unlock() for { select { case k, ok := <-keychan: diff --git a/test/sharness/t0042-add-skip.sh b/test/sharness/t0042-add-skip.sh index f0d4c6fd253..d5f7997984f 100755 --- a/test/sharness/t0042-add-skip.sh +++ b/test/sharness/t0042-add-skip.sh @@ -38,11 +38,11 @@ test_add_skip() { cat >expected <<-\EOF && added QmcAREBcjgnUpKfyFmUGnfajA1NQS5ydqRp7WfqZ6JF8Dx planets/.asteroids/ceres.txt added QmZ5eaLybJ5GUZBNwy24AA9EEDTDpA4B8qXnuN3cGxu2uF planets/.asteroids/pallas.txt - added Qmf6rbs5GF85anDuoxpSAdtuZPM9D2Yt3HngzjUVSQ7kDV planets/.asteroids added QmaowqjedBkUrMUXgzt9c2ZnAJncM9jpJtkFfgdFstGr5a planets/.charon.txt added QmU4zFD5eJtRBsWC63AvpozM9Atiadg9kPVTuTrnCYJiNF planets/.pluto.txt added QmZy3khu7qf696i5HtkgL2NotsCZ8wzvNZJ1eUdA5n8KaV planets/mars.txt added QmQnv4m3Q5512zgVtpbJ9z85osQrzZzGRn934AGh6iVEXz planets/venus.txt + added Qmf6rbs5GF85anDuoxpSAdtuZPM9D2Yt3HngzjUVSQ7kDV planets/.asteroids added QmetajtFdmzhWYodAsZoVZSiqpeJDAiaw2NwbM3xcWcpDj planets EOF test_cmp expected actual diff --git a/test/sharness/t0043-add-w.sh b/test/sharness/t0043-add-w.sh index d4f7decaa12..40e9649b74b 100755 --- a/test/sharness/t0043-add-w.sh +++ b/test/sharness/t0043-add-w.sh @@ -15,8 +15,8 @@ add_w_12='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs 4u6ead added QmZPASVB6EsADrLN8S2sak34zEHL8mx4TAVsPJU9cNnQQJ ' -add_w_21='added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs 4u6ead -added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 +add_w_21='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 +added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs 4u6ead added QmZPASVB6EsADrLN8S2sak34zEHL8mx4TAVsPJU9cNnQQJ ' add_w_d1='added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb _jo7/-s782qgs @@ -27,20 +27,20 @@ added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7 added QmNQoesMj1qp8ApE51NbtTjFYksyzkezPD4cat7V2kzbKN ' -add_w_d2='added QmVaKAt2eVftNKFfKhiBV7Mu5HjCugffuLqWqobSSFgiA7 h3qpecj0 +add_w_d2='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 added QmU9Jqks8TPu4vFr6t7EKkAKQrSJuEujNj1AkzoCeTEDFJ gnz66h/1k0xpx34 added QmSLYZycXAufRw3ePMVH2brbtYWCcWsmksGLbHcT8ia9Ke gnz66h/9cwudvacx added QmfYmpCCAMU9nLe7xbrYsHf5z2R2GxeQnsm4zavUhX9vq2 gnz66h/9ximv51cbo8 added QmWgEE4e2kfx3b8HZcBk5cLrfhoi8kTMQP2MipgPhykuV3 gnz66h/b54ygh6gs added QmcLbqEqhREGednc6mrVtanee4WHKp5JnUfiwTTHCJwuDf gnz66h/lbl5 -added QmVPwNy8pZegpsNmsjjZvdTQn4uCeuZgtzhgWhRSQWjK9x gnz66h added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb _jo7/-s782qgs added QmaVBqquUuXKjkyWHXaXfsaQUxAnsCKS95VRDHU8PzGA4K _jo7/15totauzkak- added QmaAHFG8cmhW3WLjofx5siSp44VV25ETN6ThzrU8iAqpkR _jo7/galecuirrj4r added QmeuSfhJNKwBESp1W9H8cfoMdBfW3AeHQDWXbNXQJYWp53 _jo7/mzo50r-1xidf5zx added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy +added QmVaKAt2eVftNKFfKhiBV7Mu5HjCugffuLqWqobSSFgiA7 h3qpecj0 added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7 -added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 +added QmVPwNy8pZegpsNmsjjZvdTQn4uCeuZgtzhgWhRSQWjK9x gnz66h added QmTmc46fhKC8Liuh5soy1VotdnHcqLu3r6HpPGwDZCnqL1 ' add_w_r='QmcCksBMDuuyuyfAMMNzEAx6Z7jTrdRy9a23WpufAhG9ji' @@ -57,7 +57,7 @@ test_add_w() { random-files --seed 7547632 --files 5 --dirs 2 --depth 3 m && echo "$add_w_m" >expected && ipfs add -q -r m | tail -n1 >actual && - test_cmp expected actual + test_sort_cmp expected actual ' # test single file @@ -67,7 +67,7 @@ test_add_w() { test_expect_success "ipfs add -w (single file) is correct" ' echo "$add_w_1" >expected && - test_cmp expected actual + test_sort_cmp expected actual ' # test two files together @@ -77,7 +77,7 @@ test_add_w() { test_expect_success "ipfs add -w (multiple) is correct" ' echo "$add_w_12" >expected && - test_cmp expected actual + test_sort_cmp expected actual ' test_expect_success "ipfs add -w (multiple) succeeds" ' @@ -86,7 +86,7 @@ test_add_w() { test_expect_success "ipfs add -w (multiple) orders" ' echo "$add_w_21" >expected && - test_cmp expected actual + test_sort_cmp expected actual ' # test a directory @@ -96,7 +96,7 @@ test_add_w() { test_expect_success "ipfs add -w -r (dir) is correct" ' echo "$add_w_d1" >expected && - test_cmp expected actual + test_sort_cmp expected actual ' # test files and directory @@ -107,7 +107,7 @@ test_add_w() { test_expect_success "ipfs add -w -r is correct" ' echo "$add_w_d2" >expected && - test_cmp expected actual + test_sort_cmp expected actual ' # test -w -r m/* == -r m @@ -117,7 +117,7 @@ test_add_w() { test_expect_success "ipfs add -w -r m/* == add -r m is correct" ' echo "$add_w_m" >expected && - test_cmp expected actual + test_sort_cmp expected actual ' # test repeats together @@ -130,7 +130,7 @@ test_add_w() { test_expect_success "ipfs add -w (repeats) is correct" ' echo "$add_w_r" >expected && - test_cmp expected actual + test_sort_cmp expected actual ' } diff --git a/test/sharness/t0045-ls.sh b/test/sharness/t0045-ls.sh index 4ad0acf89fc..8ba9e8ccdde 100755 --- a/test/sharness/t0045-ls.sh +++ b/test/sharness/t0045-ls.sh @@ -27,12 +27,12 @@ test_ls_cmd() { cat <<-\EOF >expected_add && added QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe testData/d1/128 added QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN testData/d1/a - added QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss testData/d1 added QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd testData/d2/1024 added QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL testData/d2/a - added QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy testData/d2 added QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH testData/f1 added QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M testData/f2 + added QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss testData/d1 + added QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy testData/d2 added QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj testData EOF test_cmp expected_add actual_add diff --git a/test/sharness/t0200-unixfs-ls.sh b/test/sharness/t0200-unixfs-ls.sh index 1b889987d4d..ea386b98ce5 100755 --- a/test/sharness/t0200-unixfs-ls.sh +++ b/test/sharness/t0200-unixfs-ls.sh @@ -27,12 +27,12 @@ test_ls_cmd() { cat <<-\EOF >expected_add && added QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe testData/d1/128 added QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN testData/d1/a - added QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss testData/d1 added QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd testData/d2/1024 added QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL testData/d2/a - added QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy testData/d2 added QmeomffUNfmQy76CQGy9NdmqEnnHU9soCexBnGU3ezPHVH testData/f1 added QmNtocSs7MoDkJMc1RkyisCSKvLadujPsfJfSdJ3e1eA1M testData/f2 + added QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss testData/d1 + added QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy testData/d2 added QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj testData EOF test_cmp expected_add actual_add diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index b011a8bd57a..90b43081754 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -316,13 +316,32 @@ test_files_api() { verify_dir_contents /cats file1 ipfs this ' + test_expect_success "write 'no-flush' succeeds" ' + echo "testing" | ipfs files write -f=false -e /cats/walrus + ' + + test_expect_success "root hash not bubbled up yet" ' + test -z "$ONLINE" || + (ipfs refs local > refsout && + test_expect_code 1 grep QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt refsout) + ' + + test_expect_success "changes bubbled up to root on inspection" ' + ipfs files stat / | head -n1 > root_hash + ' + + test_expect_success "root hash looks good" ' + echo "QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt" > root_hash_exp && + test_cmp root_hash_exp root_hash + ' + # test mv test_expect_success "can mv dir" ' ipfs files mv /cats/this/is /cats/ ' test_expect_success "mv worked" ' - verify_dir_contents /cats file1 ipfs this is && + verify_dir_contents /cats file1 ipfs this is walrus && verify_dir_contents /cats/this ' @@ -337,7 +356,14 @@ test_files_api() { # test offline and online test_files_api + +test_expect_success "clean up objects from previous test run" ' + ipfs repo gc +' + test_launch_ipfs_daemon + +ONLINE=1 # set online flag so tests can easily tell test_files_api test_kill_ipfs_daemon test_done