diff --git a/.golangci.yml b/.golangci.yml index de174afc44c..2c46046aa2a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,3 +1,8 @@ linters: enable: - stylecheck + +linters-settings: + stylecheck: + dot-import-whitelist: + - github.com/ipfs/kubo/test/cli/testutils diff --git a/coverage/Rules.mk b/coverage/Rules.mk index e0935d880ba..fd4f33cf1ac 100644 --- a/coverage/Rules.mk +++ b/coverage/Rules.mk @@ -2,7 +2,7 @@ include mk/header.mk GOCC ?= go -$(d)/coverage_deps: $$(DEPS_GO) +$(d)/coverage_deps: $$(DEPS_GO) cmd/ipfs/ipfs rm -rf $(@D)/unitcover && mkdir $(@D)/unitcover rm -rf $(@D)/sharnesscover && mkdir $(@D)/sharnesscover diff --git a/go.mod b/go.mod index a1472360405..292e82cc6ca 100644 --- a/go.mod +++ b/go.mod @@ -111,6 +111,7 @@ require ( go.uber.org/fx v1.18.2 go.uber.org/zap v1.24.0 golang.org/x/crypto v0.3.0 + golang.org/x/mod v0.7.0 golang.org/x/sync v0.1.0 golang.org/x/sys v0.3.0 ) @@ -233,7 +234,6 @@ require ( go.uber.org/multierr v1.8.0 // indirect go4.org v0.0.0-20200411211856-f5505b9728dd // indirect golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect - golang.org/x/mod v0.7.0 // indirect golang.org/x/net v0.3.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect golang.org/x/term v0.3.0 // indirect diff --git a/test/cli/basic_commands_test.go b/test/cli/basic_commands_test.go new file mode 100644 index 00000000000..30c1f1f9a9a --- /dev/null +++ b/test/cli/basic_commands_test.go @@ -0,0 +1,238 @@ +package cli + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "github.com/blang/semver/v4" + "github.com/ipfs/kubo/test/cli/harness" + . "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/assert" + gomod "golang.org/x/mod/module" +) + +var versionRegexp = regexp.MustCompile(`^ipfs version (.+)$`) + +func parseVersionOutput(s string) semver.Version { + versString := versionRegexp.FindStringSubmatch(s)[1] + v, err := semver.Parse(versString) + if err != nil { + panic(err) + } + return v +} + +func TestCurDirIsWritable(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + h.WriteFile("test.txt", "It works!") +} + +func TestIPFSVersionCommandMatchesFlag(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + commandVersionStr := node.IPFS("version").Stdout.String() + commandVersionStr = strings.TrimSpace(commandVersionStr) + commandVersion := parseVersionOutput(commandVersionStr) + + flagVersionStr := node.IPFS("--version").Stdout.String() + flagVersionStr = strings.TrimSpace(flagVersionStr) + flagVersion := parseVersionOutput(flagVersionStr) + + assert.Equal(t, commandVersion, flagVersion) +} + +func TestIPFSVersionAll(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + res := node.IPFS("version", "--all").Stdout.String() + res = strings.TrimSpace(res) + assert.Contains(t, res, "Kubo version") + assert.Contains(t, res, "Repo version") + assert.Contains(t, res, "System version") + assert.Contains(t, res, "Golang version") +} + +func TestIPFSVersionDeps(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + res := node.IPFS("version", "deps").Stdout.String() + res = strings.TrimSpace(res) + lines := SplitLines(res) + + assert.Equal(t, "github.com/ipfs/kubo@(devel)", lines[0]) + + for _, depLine := range lines[1:] { + split := strings.Split(depLine, " => ") + for _, moduleVersion := range split { + splitModVers := strings.Split(moduleVersion, "@") + modPath := splitModVers[0] + modVers := splitModVers[1] + assert.NoError(t, gomod.Check(modPath, modVers), "path: %s, version: %s", modPath, modVers) + } + } +} + +func TestIPFSCommands(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + cmds := node.IPFSCommands() + assert.Contains(t, cmds, "ipfs add") + assert.Contains(t, cmds, "ipfs daemon") + assert.Contains(t, cmds, "ipfs update") +} + +func TestAllSubcommandsAcceptHelp(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + for _, cmd := range node.IPFSCommands() { + t.Run(fmt.Sprintf("command %q accepts help", cmd), func(t *testing.T) { + t.Parallel() + splitCmd := strings.Split(cmd, " ")[1:] + node.IPFS(StrCat("help", splitCmd)...) + node.IPFS(StrCat(splitCmd, "--help")...) + }) + } +} + +func TestAllRootCommandsAreMentionedInHelpText(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + cmds := node.IPFSCommands() + var rootCmds []string + for _, cmd := range cmds { + splitCmd := strings.Split(cmd, " ") + if len(splitCmd) == 2 { + rootCmds = append(rootCmds, splitCmd[1]) + } + } + + // a few base commands are not expected to be in the help message + // but we default to requiring them to be in the help message, so that we + // have to make an conscious decision to exclude them + notInHelp := map[string]bool{ + "object": true, + "shutdown": true, + "tar": true, + "urlstore": true, + "dns": true, + } + + helpMsg := strings.TrimSpace(node.IPFS("--help").Stdout.String()) + for _, rootCmd := range rootCmds { + if _, ok := notInHelp[rootCmd]; ok { + continue + } + assert.Contains(t, helpMsg, fmt.Sprintf(" %s", rootCmd)) + } +} + +func TestCommandDocsWidth(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + // require new commands to explicitly opt in to longer lines + allowList := map[string]bool{ + "ipfs add": true, + "ipfs block put": true, + "ipfs daemon": true, + "ipfs config profile": true, + "ipfs pin remote service": true, + "ipfs name pubsub": true, + "ipfs object patch": true, + "ipfs swarm connect": true, + "ipfs p2p forward": true, + "ipfs p2p close": true, + "ipfs swarm disconnect": true, + "ipfs swarm addrs listen": true, + "ipfs dag resolve": true, + "ipfs dag get": true, + "ipfs object stat": true, + "ipfs pin remote add": true, + "ipfs config show": true, + "ipfs config edit": true, + "ipfs pin remote rm": true, + "ipfs pin remote ls": true, + "ipfs pin verify": true, + "ipfs dht get": true, + "ipfs pin remote service add": true, + "ipfs file ls": true, + "ipfs pin update": true, + "ipfs pin rm": true, + "ipfs p2p": true, + "ipfs resolve": true, + "ipfs dag stat": true, + "ipfs name publish": true, + "ipfs object diff": true, + "ipfs object patch add-link": true, + "ipfs name": true, + "ipfs object patch append-data": true, + "ipfs object patch set-data": true, + "ipfs dht put": true, + "ipfs diag profile": true, + "ipfs diag cmds": true, + "ipfs swarm addrs local": true, + "ipfs files ls": true, + "ipfs stats bw": true, + "ipfs urlstore add": true, + "ipfs swarm peers": true, + "ipfs pubsub sub": true, + "ipfs repo fsck": true, + "ipfs files write": true, + "ipfs swarm limit": true, + "ipfs commands completion fish": true, + "ipfs key export": true, + "ipfs routing get": true, + "ipfs refs": true, + "ipfs refs local": true, + "ipfs cid base32": true, + "ipfs pubsub pub": true, + "ipfs repo ls": true, + "ipfs routing put": true, + "ipfs key import": true, + "ipfs swarm peering add": true, + "ipfs swarm peering rm": true, + "ipfs swarm peering ls": true, + "ipfs update": true, + "ipfs swarm stats": true, + } + for _, cmd := range node.IPFSCommands() { + if _, ok := allowList[cmd]; ok { + continue + } + t.Run(fmt.Sprintf("command %q conforms to docs width limit", cmd), func(t *testing.T) { + splitCmd := strings.Split(cmd, " ") + resStr := node.IPFS(StrCat(splitCmd[1:], "--help")...) + res := strings.TrimSpace(resStr.Stdout.String()) + for _, line := range SplitLines(res) { + assert.LessOrEqualf(t, len(line), 80, "expected width %d < 80 for %q", len(line), cmd) + } + + }) + } +} + +func TestAllCommandsFailWhenPassedBadFlag(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + + for _, cmd := range node.IPFSCommands() { + t.Run(fmt.Sprintf("command %q fails when passed a bad flag", cmd), func(t *testing.T) { + splitCmd := strings.Split(cmd, " ") + res := node.RunIPFS(StrCat(splitCmd, "--badflag")...) + assert.Equal(t, 1, res.Cmd.ProcessState.ExitCode()) + }) + } + +} + +func TestCommandsFlags(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + resStr := node.IPFS("commands", "--flags").Stdout.String() + assert.Contains(t, resStr, "ipfs pin add --recursive / ipfs pin add -r") + assert.Contains(t, resStr, "ipfs id --format / ipfs id -f") + assert.Contains(t, resStr, "ipfs repo gc --quiet / ipfs repo gc -q") +} diff --git a/test/cli/completion_test.go b/test/cli/completion_test.go new file mode 100644 index 00000000000..0c40eb02b6e --- /dev/null +++ b/test/cli/completion_test.go @@ -0,0 +1,31 @@ +package cli + +import ( + "fmt" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + . "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/assert" +) + +func TestBashCompletion(t *testing.T) { + t.Parallel() + h := harness.NewT(t) + node := h.NewNode() + + res := node.IPFS("commands", "completion", "bash") + + length := len(res.Stdout.String()) + if length < 100 { + t.Fatalf("expected a long Bash completion file, but got one of length %d", length) + } + + t.Run("completion file can be loaded in bash", func(t *testing.T) { + RequiresLinux(t) + + completionFile := h.WriteToTemp(res.Stdout.String()) + res = h.Sh(fmt.Sprintf("source %s && type -t _ipfs", completionFile)) + assert.NoError(t, res.Err) + }) +} diff --git a/test/cli/delegated_routing_http_test.go b/test/cli/delegated_routing_http_test.go new file mode 100644 index 00000000000..0b39a9b12e6 --- /dev/null +++ b/test/cli/delegated_routing_http_test.go @@ -0,0 +1,121 @@ +package cli + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/ipfs/kubo/config" + "github.com/ipfs/kubo/test/cli/harness" + . "github.com/ipfs/kubo/test/cli/testutils" + "github.com/stretchr/testify/assert" +) + +func TestHTTPDelegatedRouting(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + + fakeServer := func(resp string) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write([]byte(resp)) + if err != nil { + panic(err) + } + })) + } + + findProvsCID := "baeabep4vu3ceru7nerjjbk37sxb7wmftteve4hcosmyolsbsiubw2vr6pqzj6mw7kv6tbn6nqkkldnklbjgm5tzbi4hkpkled4xlcr7xz4bq" + prov := "12D3KooWARYacCc6eoCqvsS9RW9MA2vo51CV75deoiqssx3YgyYJ" + + t.Run("default routing config has no routers defined", func(t *testing.T) { + assert.Nil(t, node.ReadConfig().Routing.Routers) + }) + + t.Run("no routers means findprovs returns no results", func(t *testing.T) { + res := node.IPFS("routing", "findprovs", findProvsCID).Stdout.String() + assert.Empty(t, res) + }) + + t.Run("no routers means findprovs returns no results", func(t *testing.T) { + res := node.IPFS("routing", "findprovs", findProvsCID).Stdout.String() + assert.Empty(t, res) + }) + + node.StopDaemon() + + t.Run("missing method params make the daemon fail", func(t *testing.T) { + node.UpdateConfig(func(cfg *config.Config) { + cfg.Routing.Type = config.NewOptionalString("custom") + cfg.Routing.Methods = config.Methods{ + "find-peers": {RouterName: "TestDelegatedRouter"}, + "find-providers": {RouterName: "TestDelegatedRouter"}, + "get-ipns": {RouterName: "TestDelegatedRouter"}, + "provide": {RouterName: "TestDelegatedRouter"}, + } + }) + res := node.RunIPFS("daemon") + assert.Equal(t, 1, res.ExitErr.ProcessState.ExitCode()) + assert.Contains( + t, + res.Stderr.String(), + `method name "put-ipns" is missing from Routing.Methods config param`, + ) + }) + + t.Run("having wrong methods makes daemon fail", func(t *testing.T) { + node.UpdateConfig(func(cfg *config.Config) { + cfg.Routing.Type = config.NewOptionalString("custom") + cfg.Routing.Methods = config.Methods{ + "find-peers": {RouterName: "TestDelegatedRouter"}, + "find-providers": {RouterName: "TestDelegatedRouter"}, + "get-ipns": {RouterName: "TestDelegatedRouter"}, + "provide": {RouterName: "TestDelegatedRouter"}, + "put-ipns": {RouterName: "TestDelegatedRouter"}, + "NOT_SUPPORTED": {RouterName: "TestDelegatedRouter"}, + } + }) + res := node.RunIPFS("daemon") + assert.Equal(t, 1, res.ExitErr.ProcessState.ExitCode()) + assert.Contains( + t, + res.Stderr.String(), + `method name "NOT_SUPPORTED" is not a supported method on Routing.Methods config param`, + ) + }) + + t.Run("adding HTTP delegated routing endpoint to Routing.Routers config works", func(t *testing.T) { + server := fakeServer(ToJSONStr(JSONObj{ + "Providers": []JSONObj{{ + "Protocol": "transport-bitswap", + "Schema": "bitswap", + "ID": prov, + "Addrs": []string{"/ip4/0.0.0.0/tcp/4001", "/ip4/0.0.0.0/tcp/4002"}, + }}, + })) + t.Cleanup(server.Close) + + node.IPFS("config", "Routing.Type", "--json", `"custom"`) + node.IPFS("config", "Routing.Routers.TestDelegatedRouter", "--json", ToJSONStr(JSONObj{ + "Type": "http", + "Parameters": JSONObj{ + "Endpoint": server.URL, + }, + })) + node.IPFS("config", "Routing.Methods", "--json", ToJSONStr(JSONObj{ + "find-peers": JSONObj{"RouterName": "TestDelegatedRouter"}, + "find-providers": JSONObj{"RouterName": "TestDelegatedRouter"}, + "get-ipns": JSONObj{"RouterName": "TestDelegatedRouter"}, + "provide": JSONObj{"RouterName": "TestDelegatedRouter"}, + "put-ipns": JSONObj{"RouterName": "TestDelegatedRouter"}, + })) + + res := node.IPFS("config", "Routing.Routers.TestDelegatedRouter.Parameters.Endpoint") + assert.Equal(t, res.Stdout.Trimmed(), server.URL) + + node.StartDaemon() + + res = node.IPFS("routing", "findprovs", findProvsCID) + assert.Equal(t, prov, res.Stdout.Trimmed()) + }) + +} diff --git a/test/cli/harness/buffer.go b/test/cli/harness/buffer.go new file mode 100644 index 00000000000..b40e160b0d2 --- /dev/null +++ b/test/cli/harness/buffer.go @@ -0,0 +1,45 @@ +package harness + +import ( + "strings" + "sync" + + "github.com/ipfs/kubo/test/cli/testutils" +) + +// Buffer is a thread-safe byte buffer. +type Buffer struct { + b strings.Builder + m sync.Mutex +} + +func (b *Buffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Write(p) +} + +func (b *Buffer) String() string { + b.m.Lock() + defer b.m.Unlock() + return b.b.String() +} + +// Trimmed returns the bytes as a string, with leading and trailing whitespace removed. +func (b *Buffer) Trimmed() string { + b.m.Lock() + defer b.m.Unlock() + return strings.TrimSpace(b.b.String()) +} + +func (b *Buffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return []byte(b.b.String()) +} + +func (b *Buffer) Lines() []string { + b.m.Lock() + defer b.m.Unlock() + return testutils.SplitLines(b.b.String()) +} diff --git a/test/cli/harness/harness.go b/test/cli/harness/harness.go new file mode 100644 index 00000000000..dd9f38ec3f2 --- /dev/null +++ b/test/cli/harness/harness.go @@ -0,0 +1,187 @@ +package harness + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + logging "github.com/ipfs/go-log/v2" + . "github.com/ipfs/kubo/test/cli/testutils" +) + +// Harness tracks state for a test, such as temp dirs and IFPS nodes, and cleans them up after the test. +type Harness struct { + Dir string + IPFSBin string + Runner *Runner + NodesRoot string + Nodes Nodes +} + +// TODO: use zaptest.NewLogger(t) instead +func EnableDebugLogging() { + err := logging.SetLogLevel("testharness", "DEBUG") + if err != nil { + panic(err) + } +} + +// NewT constructs a harness that cleans up after the given test is done. +func NewT(t *testing.T, options ...func(h *Harness)) *Harness { + h := New(options...) + t.Cleanup(h.Cleanup) + return h +} + +func New(options ...func(h *Harness)) *Harness { + h := &Harness{Runner: &Runner{Env: osEnviron()}} + + // walk up to find the root dir, from which we can locate the binary + wd, err := os.Getwd() + if err != nil { + panic(err) + } + goMod := FindUp("go.mod", wd) + if goMod == "" { + panic("unable to find root dir") + } + rootDir := filepath.Dir(goMod) + h.IPFSBin = filepath.Join(rootDir, "cmd", "ipfs", "ipfs") + + // setup working dir + tmpDir, err := os.MkdirTemp("", "") + if err != nil { + log.Panicf("error creating temp dir: %s", err) + } + h.Dir = tmpDir + h.Runner.Dir = h.Dir + + h.NodesRoot = filepath.Join(h.Dir, ".nodes") + + // apply any customizations + // this should happen after all initialization + for _, o := range options { + o(h) + } + + return h +} + +func osEnviron() map[string]string { + m := map[string]string{} + for _, entry := range os.Environ() { + split := strings.Split(entry, "=") + m[split[0]] = split[1] + } + return m +} + +func (h *Harness) NewNode() *Node { + nodeID := len(h.Nodes) + node := BuildNode(h.IPFSBin, h.NodesRoot, nodeID) + h.Nodes = append(h.Nodes, node) + return node +} + +func (h *Harness) NewNodes(count int) Nodes { + var newNodes []*Node + for i := 0; i < count; i++ { + newNodes = append(newNodes, h.NewNode()) + } + return newNodes +} + +// WriteToTemp writes the given contents to a guaranteed-unique temp file, returning its path. +func (h *Harness) WriteToTemp(contents string) string { + f := h.TempFile() + _, err := f.WriteString(contents) + if err != nil { + log.Panicf("writing to temp file: %s", err.Error()) + } + err = f.Close() + if err != nil { + log.Panicf("closing temp file: %s", err.Error()) + } + return f.Name() +} + +// TempFile creates a new unique temp file. +func (h *Harness) TempFile() *os.File { + f, err := os.CreateTemp(h.Dir, "") + if err != nil { + log.Panicf("creating temp file: %s", err.Error()) + } + return f +} + +// WriteFile writes a file given a filename and its contents. +// The filename should be a relative path. +func (h *Harness) WriteFile(filename, contents string) { + if filepath.IsAbs(filename) { + log.Panicf("%s must be a relative path", filename) + } + absPath := filepath.Join(h.Runner.Dir, filename) + err := os.WriteFile(absPath, []byte(contents), 0644) + if err != nil { + log.Panicf("writing '%s' ('%s'): %s", filename, absPath, err.Error()) + } +} + +func WaitForFile(path string, timeout time.Duration) error { + start := time.Now() + timer := time.NewTimer(timeout) + ticker := time.NewTicker(1 * time.Millisecond) + defer timer.Stop() + defer ticker.Stop() + for { + select { + case <-timer.C: + end := time.Now() + return fmt.Errorf("timeout waiting for %s after %v", path, end.Sub(start)) + case <-ticker.C: + _, err := os.Stat(path) + if err == nil { + return nil + } + if errors.Is(err, os.ErrNotExist) { + continue + } + return fmt.Errorf("error waiting for %s: %w", path, err) + } + } +} + +func (h *Harness) Mkdirs(paths ...string) { + for _, path := range paths { + if filepath.IsAbs(path) { + log.Panicf("%s must be a relative path when making dirs", path) + } + absPath := filepath.Join(h.Runner.Dir, path) + err := os.MkdirAll(absPath, 0777) + if err != nil { + log.Panicf("recursively making dirs under %s: %s", absPath, err) + } + } +} + +func (h *Harness) Sh(expr string) RunResult { + return h.Runner.Run(RunRequest{ + Path: "bash", + Args: []string{"-c", expr}, + }) +} + +func (h *Harness) Cleanup() { + log.Debugf("cleaning up cluster") + h.Nodes.StopDaemons() + // TODO: don't do this if test fails, not sure how? + log.Debugf("removing harness dir") + err := os.RemoveAll(h.Dir) + if err != nil { + log.Panicf("removing temp dir %s: %s", h.Dir, err) + } +} diff --git a/test/cli/harness/ipfs.go b/test/cli/harness/ipfs.go new file mode 100644 index 00000000000..6ae7bdf947d --- /dev/null +++ b/test/cli/harness/ipfs.go @@ -0,0 +1,80 @@ +package harness + +import ( + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + + . "github.com/ipfs/kubo/test/cli/testutils" +) + +func (n *Node) IPFSCommands() []string { + res := n.IPFS("commands").Stdout.String() + res = strings.TrimSpace(res) + split := SplitLines(res) + var cmds []string + for _, line := range split { + trimmed := strings.TrimSpace(line) + if trimmed == "ipfs" { + continue + } + cmds = append(cmds, trimmed) + } + return cmds +} + +func (n *Node) SetIPFSConfig(key string, val interface{}, flags ...string) { + valBytes, err := json.Marshal(val) + if err != nil { + log.Panicf("marshling config for key '%s': %s", key, err) + } + valStr := string(valBytes) + + args := []string{"config", "--json"} + args = append(args, flags...) + args = append(args, key, valStr) + n.IPFS(args...) + + // validate the config was set correctly + var newVal string + n.GetIPFSConfig(key, &newVal) + if val != newVal { + log.Panicf("key '%s' did not retain value '%s' after it was set, got '%s'", key, val, newVal) + } +} + +func (n *Node) GetIPFSConfig(key string, val interface{}) { + res := n.IPFS("config", key) + valStr := strings.TrimSpace(res.Stdout.String()) + // only when the result is a string is the result not well-formed JSON, + // so check the value type and add quotes if it's expected to be a string + reflectVal := reflect.ValueOf(val) + if reflectVal.Kind() == reflect.Ptr && reflectVal.Elem().Kind() == reflect.String { + valStr = fmt.Sprintf(`"%s"`, valStr) + } + err := json.Unmarshal([]byte(valStr), val) + if err != nil { + log.Fatalf("unmarshaling config for key '%s', value '%s': %s", key, valStr, err) + } +} + +func (n *Node) IPFSAddStr(content string, args ...string) string { + log.Debugf("node %d adding content '%s' with args: %v", n.ID, PreviewStr(content), args) + return n.IPFSAdd(strings.NewReader(content), args...) +} + +func (n *Node) IPFSAdd(content io.Reader, args ...string) string { + log.Debugf("node %d adding with args: %v", n.ID, args) + fullArgs := []string{"add", "-q"} + fullArgs = append(fullArgs, args...) + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: fullArgs, + CmdOpts: []CmdOpt{RunWithStdin(content)}, + }) + out := strings.TrimSpace(res.Stdout.String()) + log.Debugf("add result: %q", out) + return out +} diff --git a/test/cli/harness/node.go b/test/cli/harness/node.go new file mode 100644 index 00000000000..227737eb96c --- /dev/null +++ b/test/cli/harness/node.go @@ -0,0 +1,383 @@ +package harness + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/ipfs/kubo/config" + serial "github.com/ipfs/kubo/config/serialize" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" +) + +var log = logging.Logger("testharness") + +// Node is a single Kubo node. +// Each node has its own config and can run its own Kubo daemon. +type Node struct { + ID int + Dir string + + APIListenAddr multiaddr.Multiaddr + SwarmAddr multiaddr.Multiaddr + EnableMDNS bool + + IPFSBin string + Runner *Runner + + daemon *RunResult +} + +func BuildNode(ipfsBin, baseDir string, id int) *Node { + dir := filepath.Join(baseDir, strconv.Itoa(id)) + if err := os.MkdirAll(dir, 0755); err != nil { + panic(err) + } + + env := environToMap(os.Environ()) + env["IPFS_PATH"] = dir + + return &Node{ + ID: id, + Dir: dir, + IPFSBin: ipfsBin, + Runner: &Runner{ + Env: env, + Dir: dir, + }, + } +} + +func (n *Node) ReadConfig() *config.Config { + cfg, err := serial.Load(filepath.Join(n.Dir, "config")) + if err != nil { + panic(err) + } + return cfg +} + +func (n *Node) WriteConfig(c *config.Config) { + err := serial.WriteConfigFile(filepath.Join(n.Dir, "config"), c) + if err != nil { + panic(err) + } +} + +func (n *Node) UpdateConfig(f func(cfg *config.Config)) { + cfg := n.ReadConfig() + f(cfg) + n.WriteConfig(cfg) +} + +func (n *Node) IPFS(args ...string) RunResult { + res := n.RunIPFS(args...) + n.Runner.AssertNoError(res) + return res +} + +func (n *Node) PipeStrToIPFS(s string, args ...string) RunResult { + return n.PipeToIPFS(strings.NewReader(s), args...) +} + +func (n *Node) PipeToIPFS(reader io.Reader, args ...string) RunResult { + res := n.RunPipeToIPFS(reader, args...) + n.Runner.AssertNoError(res) + return res +} + +func (n *Node) RunPipeToIPFS(reader io.Reader, args ...string) RunResult { + return n.Runner.Run(RunRequest{ + Path: n.IPFSBin, + Args: args, + CmdOpts: []CmdOpt{RunWithStdin(reader)}, + }) +} + +func (n *Node) RunIPFS(args ...string) RunResult { + return n.Runner.Run(RunRequest{ + Path: n.IPFSBin, + Args: args, + }) +} + +// Init initializes and configures the IPFS node, after which it is ready to run. +func (n *Node) Init(ipfsArgs ...string) *Node { + n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: append([]string{"init"}, ipfsArgs...), + }) + + if n.SwarmAddr == nil { + swarmAddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/0") + if err != nil { + panic(err) + } + n.SwarmAddr = swarmAddr + } + + if n.APIListenAddr == nil { + apiAddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/0") + if err != nil { + panic(err) + } + n.APIListenAddr = apiAddr + } + + n.UpdateConfig(func(cfg *config.Config) { + cfg.Bootstrap = []string{} + cfg.Addresses.Swarm = []string{n.SwarmAddr.String()} + cfg.Addresses.API = []string{n.APIListenAddr.String()} + cfg.Addresses.Gateway = []string{""} + cfg.Swarm.DisableNatPortMap = true + cfg.Discovery.MDNS.Enabled = n.EnableMDNS + }) + return n +} + +func (n *Node) StartDaemon(ipfsArgs ...string) *Node { + alive := n.IsAlive() + if alive { + log.Panicf("node %d is already running", n.ID) + } + + daemonArgs := append([]string{"daemon"}, ipfsArgs...) + log.Debugf("starting node %d", n.ID) + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: daemonArgs, + RunFunc: (*exec.Cmd).Start, + }) + + n.daemon = &res + + log.Debugf("node %d started, checking API", n.ID) + n.WaitOnAPI() + return n +} + +func (n *Node) signalAndWait(watch <-chan struct{}, signal os.Signal, t time.Duration) bool { + err := n.daemon.Cmd.Process.Signal(signal) + if err != nil { + if errors.Is(err, os.ErrProcessDone) { + log.Debugf("process for node %d has already finished", n.ID) + return true + } + log.Panicf("error killing daemon for node %d with peer ID %s: %s", n.ID, n.PeerID(), err.Error()) + } + timer := time.NewTimer(t) + defer timer.Stop() + select { + case <-watch: + return true + case <-timer.C: + return false + } +} + +func (n *Node) StopDaemon() *Node { + log.Debugf("stopping node %d", n.ID) + if n.daemon == nil { + log.Debugf("didn't stop node %d since no daemon present", n.ID) + return n + } + watch := make(chan struct{}, 1) + go func() { + _, _ = n.daemon.Cmd.Process.Wait() + watch <- struct{}{} + }() + log.Debugf("signaling node %d with SIGTERM", n.ID) + if n.signalAndWait(watch, syscall.SIGTERM, 1*time.Second) { + return n + } + log.Debugf("signaling node %d with SIGTERM", n.ID) + if n.signalAndWait(watch, syscall.SIGTERM, 2*time.Second) { + return n + } + log.Debugf("signaling node %d with SIGQUIT", n.ID) + if n.signalAndWait(watch, syscall.SIGQUIT, 5*time.Second) { + return n + } + log.Debugf("signaling node %d with SIGKILL", n.ID) + if n.signalAndWait(watch, syscall.SIGKILL, 5*time.Second) { + return n + } + log.Panicf("timed out stopping node %d with peer ID %s", n.ID, n.PeerID()) + return n +} + +func (n *Node) APIAddr() multiaddr.Multiaddr { + ma, err := n.TryAPIAddr() + if err != nil { + panic(err) + } + return ma +} + +func (n *Node) TryAPIAddr() (multiaddr.Multiaddr, error) { + b, err := os.ReadFile(filepath.Join(n.Dir, "api")) + if err != nil { + return nil, err + } + ma, err := multiaddr.NewMultiaddr(string(b)) + if err != nil { + return nil, err + } + return ma, nil +} + +func (n *Node) checkAPI() bool { + apiAddr, err := n.TryAPIAddr() + if err != nil { + log.Debugf("node %d API addr not available yet: %s", n.ID, err.Error()) + return false + } + ip, err := apiAddr.ValueForProtocol(multiaddr.P_IP4) + if err != nil { + panic(err) + } + port, err := apiAddr.ValueForProtocol(multiaddr.P_TCP) + if err != nil { + panic(err) + } + url := fmt.Sprintf("http://%s:%s/api/v0/id", ip, port) + log.Debugf("checking API for node %d at %s", n.ID, url) + httpResp, err := http.Post(url, "", nil) + if err != nil { + log.Debugf("node %d API check error: %s", err.Error()) + return false + } + defer httpResp.Body.Close() + resp := struct { + ID string + }{} + + respBytes, err := io.ReadAll(httpResp.Body) + if err != nil { + log.Debugf("error reading API check response for node %d: %s", n.ID, err.Error()) + return false + } + log.Debugf("got API check response for node %d: %s", n.ID, string(respBytes)) + + err = json.Unmarshal(respBytes, &resp) + if err != nil { + log.Debugf("error decoding API check response for node %d: %s", n.ID, err.Error()) + return false + } + if resp.ID == "" { + log.Debugf("API check response for node %d did not contain a Peer ID", n.ID) + return false + } + respPeerID, err := peer.Decode(resp.ID) + if err != nil { + panic(err) + } + + peerID := n.PeerID() + if respPeerID != peerID { + log.Panicf("expected peer ID %s but got %s", peerID, resp.ID) + } + + log.Debugf("API check for node %d successful", n.ID) + return true +} + +func (n *Node) PeerID() peer.ID { + cfg := n.ReadConfig() + id, err := peer.Decode(cfg.Identity.PeerID) + if err != nil { + panic(err) + } + return id +} + +func (n *Node) WaitOnAPI() *Node { + log.Debugf("waiting on API for node %d", n.ID) + for i := 0; i < 50; i++ { + if n.checkAPI() { + return n + } + time.Sleep(400 * time.Millisecond) + } + log.Panicf("node %d with peer ID %s failed to come online: \n%s\n\n%s", n.ID, n.PeerID(), n.daemon.Stderr.String(), n.daemon.Stdout.String()) + return n +} + +func (n *Node) IsAlive() bool { + if n.daemon == nil || n.daemon.Cmd == nil || n.daemon.Cmd.Process == nil { + return false + } + log.Debugf("signaling node %d daemon process for liveness check", n.ID) + err := n.daemon.Cmd.Process.Signal(syscall.Signal(0)) + if err == nil { + log.Debugf("node %d daemon is alive", n.ID) + return true + } + log.Debugf("node %d daemon not alive: %s", err.Error()) + return false +} + +func (n *Node) SwarmAddrs() []multiaddr.Multiaddr { + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: []string{"swarm", "addrs", "local"}, + }) + ipfsProtocol := multiaddr.ProtocolWithCode(multiaddr.P_IPFS).Name + peerID := n.PeerID() + out := strings.TrimSpace(res.Stdout.String()) + outLines := strings.Split(out, "\n") + var addrs []multiaddr.Multiaddr + for _, addrStr := range outLines { + ma, err := multiaddr.NewMultiaddr(addrStr) + if err != nil { + panic(err) + } + + // add the peer ID to the multiaddr if it doesn't have it + _, err = ma.ValueForProtocol(multiaddr.P_IPFS) + if errors.Is(err, multiaddr.ErrProtocolNotFound) { + comp, err := multiaddr.NewComponent(ipfsProtocol, peerID.String()) + if err != nil { + panic(err) + } + ma = ma.Encapsulate(comp) + } + addrs = append(addrs, ma) + } + return addrs +} + +func (n *Node) Connect(other *Node) *Node { + n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: []string{"swarm", "connect", other.SwarmAddrs()[0].String()}, + }) + return n +} + +func (n *Node) Peers() []multiaddr.Multiaddr { + res := n.Runner.MustRun(RunRequest{ + Path: n.IPFSBin, + Args: []string{"swarm", "peers"}, + }) + lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n") + var addrs []multiaddr.Multiaddr + for _, line := range lines { + ma, err := multiaddr.NewMultiaddr(line) + if err != nil { + panic(err) + } + addrs = append(addrs, ma) + } + return addrs +} diff --git a/test/cli/harness/nodes.go b/test/cli/harness/nodes.go new file mode 100644 index 00000000000..b142e3d8f43 --- /dev/null +++ b/test/cli/harness/nodes.go @@ -0,0 +1,47 @@ +package harness + +import ( + "github.com/multiformats/go-multiaddr" +) + +// Nodes is a collection of Kubo nodes along with operations on groups of nodes. +type Nodes []*Node + +func (n Nodes) Init(args ...string) Nodes { + for _, node := range n { + node.Init() + } + return n +} + +func (n Nodes) Connect() Nodes { + for i, node := range n { + for j, otherNode := range n { + if i == j { + continue + } + node.Connect(otherNode) + } + } + for _, node := range n { + firstPeer := node.Peers()[0] + if _, err := firstPeer.ValueForProtocol(multiaddr.P_P2P); err != nil { + log.Panicf("unexpected state for node %d with peer ID %s: %s", node.ID, node.PeerID(), err) + } + } + return n +} + +func (n Nodes) StartDaemons() Nodes { + for _, node := range n { + node.StartDaemon() + } + return n +} + +func (n Nodes) StopDaemons() Nodes { + for _, node := range n { + node.StopDaemon() + } + return n +} diff --git a/test/cli/harness/run.go b/test/cli/harness/run.go new file mode 100644 index 00000000000..1a6b32fc2f1 --- /dev/null +++ b/test/cli/harness/run.go @@ -0,0 +1,140 @@ +package harness + +import ( + "fmt" + "io" + "os/exec" + "strings" +) + +// Runner is a process runner which can run subprocesses and aggregate output. +type Runner struct { + Env map[string]string + Dir string + Verbose bool +} + +type CmdOpt func(*exec.Cmd) +type RunFunc func(*exec.Cmd) error + +var RunFuncStart = (*exec.Cmd).Start + +type RunRequest struct { + Path string + Args []string + // Options that are applied to the exec.Cmd just before running it + CmdOpts []CmdOpt + // Function to use to run the command. + // If not specified, defaults to cmd.Run + RunFunc func(*exec.Cmd) error + Verbose bool +} + +type RunResult struct { + Stdout *Buffer + Stderr *Buffer + Err error + ExitErr *exec.ExitError + Cmd *exec.Cmd +} + +func environToMap(environ []string) map[string]string { + m := map[string]string{} + for _, e := range environ { + kv := strings.Split(e, "=") + m[kv[0]] = kv[1] + } + return m +} + +func (r *Runner) Run(req RunRequest) RunResult { + cmd := exec.Command(req.Path, req.Args...) + stdout := &Buffer{} + stderr := &Buffer{} + cmd.Stdout = stdout + cmd.Stderr = stderr + cmd.Dir = r.Dir + + for k, v := range r.Env { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + + for _, o := range req.CmdOpts { + o(cmd) + } + + if req.RunFunc == nil { + req.RunFunc = (*exec.Cmd).Run + } + + log.Debugf("running %v", cmd.Args) + + err := req.RunFunc(cmd) + + result := RunResult{ + Stdout: stdout, + Stderr: stderr, + Cmd: cmd, + Err: err, + } + + if exitErr, ok := err.(*exec.ExitError); ok { + result.ExitErr = exitErr + } + + return result +} + +// MustRun runs the command and fails the test if the command fails. +func (r *Runner) MustRun(req RunRequest) RunResult { + result := r.Run(req) + r.AssertNoError(result) + return result +} + +func (r *Runner) AssertNoError(result RunResult) { + if result.ExitErr != nil { + log.Panicf("'%s' returned error, code: %d, err: %s\nstdout:%s\nstderr:%s\n", + result.Cmd.Args, result.ExitErr.ExitCode(), result.ExitErr.Error(), result.Stdout.String(), result.Stderr.String()) + + } + if result.Err != nil { + log.Panicf("unable to run %s: %s", result.Cmd.Path, result.Err) + + } +} + +func RunWithEnv(env map[string]string) CmdOpt { + return func(cmd *exec.Cmd) { + for k, v := range env { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } +} + +func RunWithPath(path string) CmdOpt { + return func(cmd *exec.Cmd) { + var newEnv []string + for _, env := range cmd.Env { + e := strings.Split(env, "=") + if e[0] == "PATH" { + paths := strings.Split(e[1], ":") + paths = append(paths, path) + e[1] = strings.Join(paths, ":") + fmt.Printf("path: %s\n", strings.Join(e, "=")) + } + newEnv = append(newEnv, strings.Join(e, "=")) + } + cmd.Env = newEnv + } +} + +func RunWithStdin(reader io.Reader) CmdOpt { + return func(cmd *exec.Cmd) { + cmd.Stdin = reader + } +} + +func RunWithStdinStr(s string) CmdOpt { + return RunWithStdin(strings.NewReader(s)) +} diff --git a/test/cli/init_test.go b/test/cli/init_test.go new file mode 100644 index 00000000000..359856e6bae --- /dev/null +++ b/test/cli/init_test.go @@ -0,0 +1,164 @@ +package cli + +import ( + "fmt" + "os" + fp "path/filepath" + "strings" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + . "github.com/ipfs/kubo/test/cli/testutils" + pb "github.com/libp2p/go-libp2p/core/crypto/pb" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func validatePeerID(t *testing.T, peerID peer.ID, expErr error, expAlgo pb.KeyType) { + assert.NoError(t, peerID.Validate()) + pub, err := peerID.ExtractPublicKey() + assert.ErrorIs(t, expErr, err) + if expAlgo != 0 { + assert.Equal(t, expAlgo, pub.Type()) + } +} + +func testInitAlgo(t *testing.T, initFlags []string, expOutputName string, expPeerIDPubKeyErr error, expPeerIDPubKeyType pb.KeyType) { + t.Run("init", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + initRes := node.IPFS(StrCat("init", initFlags)...) + + lines := []string{ + fmt.Sprintf("generating %s keypair...done", expOutputName), + fmt.Sprintf("peer identity: %s", node.PeerID().String()), + fmt.Sprintf("initializing IPFS node at %s", node.Dir), + "to get started, enter:", + fmt.Sprintf("\n\tipfs cat /ipfs/%s/readme\n\n", CIDWelcomeDocs), + } + expectedInitOutput := strings.Join(lines, "\n") + assert.Equal(t, expectedInitOutput, initRes.Stdout.String()) + + assert.DirExists(t, node.Dir) + assert.FileExists(t, fp.Join(node.Dir, "config")) + assert.DirExists(t, fp.Join(node.Dir, "datastore")) + assert.DirExists(t, fp.Join(node.Dir, "blocks")) + assert.NoFileExists(t, fp.Join(node.Dir, "._check_writeable")) + + _, err := os.ReadDir(node.Dir) + assert.NoError(t, err, "ipfs dir should be listable") + + validatePeerID(t, node.PeerID(), expPeerIDPubKeyErr, expPeerIDPubKeyType) + + res := node.IPFS("config", "Mounts.IPFS") + assert.Equal(t, "/ipfs", res.Stdout.Trimmed()) + + node.IPFS("cat", fmt.Sprintf("/ipfs/%s/readme", CIDWelcomeDocs)) + }) + + t.Run("init empty repo", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + initRes := node.IPFS(StrCat("init", "--empty-repo", initFlags)...) + + validatePeerID(t, node.PeerID(), expPeerIDPubKeyErr, expPeerIDPubKeyType) + + lines := []string{ + fmt.Sprintf("generating %s keypair...done", expOutputName), + fmt.Sprintf("peer identity: %s", node.PeerID().String()), + fmt.Sprintf("initializing IPFS node at %s\n", node.Dir), + } + expectedEmptyInitOutput := strings.Join(lines, "\n") + assert.Equal(t, expectedEmptyInitOutput, initRes.Stdout.String()) + + catRes := node.RunIPFS("cat", fmt.Sprintf("/ipfs/%s/readme", CIDWelcomeDocs)) + assert.NotEqual(t, 0, catRes.ExitErr.ExitCode(), "welcome readme doesn't exist") + + idRes := node.IPFS("id", "-f", "") + version := node.IPFS("version", "-n").Stdout.Trimmed() + assert.Contains(t, idRes.Stdout.String(), version) + }) +} + +func TestInit(t *testing.T) { + t.Parallel() + + t.Run("init fails if the repo dir has no perms", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + badDir := fp.Join(node.Dir, ".badipfs") + err := os.Mkdir(badDir, 0000) + require.NoError(t, err) + + res := node.RunIPFS("init", "--repo-dir", badDir) + assert.NotEqual(t, 0, res.Cmd.ProcessState.ExitCode()) + assert.Contains(t, res.Stderr.String(), "permission denied") + + }) + + t.Run("init with ed25519", func(t *testing.T) { + t.Parallel() + testInitAlgo(t, []string{"--algorithm=ed25519"}, "ED25519", nil, pb.KeyType_Ed25519) + }) + + t.Run("init with rsa", func(t *testing.T) { + t.Parallel() + testInitAlgo(t, []string{"--bits=2048", "--algorithm=rsa"}, "2048-bit RSA", peer.ErrNoPublicKey, 0) + }) + + t.Run("init with default algorithm", func(t *testing.T) { + t.Parallel() + testInitAlgo(t, []string{}, "ED25519", nil, pb.KeyType_Ed25519) + }) + + t.Run("ipfs init --profile with invalid profile fails", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + res := node.RunIPFS("init", "--profile=invalid_profile") + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Equal(t, "Error: invalid configuration profile: invalid_profile", res.Stderr.Trimmed()) + }) + + t.Run("ipfs init --profile with valid profile succeeds", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode() + node.IPFS("init", "--profile=server") + }) + + t.Run("ipfs config looks good", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init("--profile=server") + + lines := node.IPFS("config", "Swarm.AddrFilters").Stdout.Lines() + assert.Len(t, lines, 18) + + out := node.IPFS("config", "Bootstrap").Stdout.Trimmed() + assert.Equal(t, "[]", out) + + out = node.IPFS("config", "Addresses.API").Stdout.Trimmed() + assert.Equal(t, "/ip4/127.0.0.1/tcp/0", out) + }) + + t.Run("ipfs init from existing config succeeds", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2) + node1 := nodes[0] + node2 := nodes[1] + + node1.Init("--profile=server") + + node2.IPFS("init", fp.Join(node1.Dir, "config")) + out := node2.IPFS("config", "Addresses.API").Stdout.Trimmed() + assert.Equal(t, "/ip4/127.0.0.1/tcp/0", out) + }) + + t.Run("ipfs init should not run while daemon is running", func(t *testing.T) { + t.Parallel() + node := harness.NewT(t).NewNode().Init().StartDaemon() + res := node.RunIPFS("init") + assert.NotEqual(t, 0, res.ExitErr.ExitCode()) + assert.Contains(t, res.Stderr.String(), "Error: ipfs daemon is running. please stop it to run this command") + }) + +} diff --git a/test/cli/ping_test.go b/test/cli/ping_test.go new file mode 100644 index 00000000000..c4195024a9c --- /dev/null +++ b/test/cli/ping_test.go @@ -0,0 +1,73 @@ +package cli + +import ( + "fmt" + "testing" + + "github.com/ipfs/kubo/test/cli/harness" + "github.com/stretchr/testify/assert" +) + +func TestPing(t *testing.T) { + t.Parallel() + + t.Run("other", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect() + node1 := nodes[0] + node2 := nodes[1] + + node1.IPFS("ping", "-n", "2", "--", node2.PeerID().String()) + node2.IPFS("ping", "-n", "2", "--", node1.PeerID().String()) + }) + + t.Run("ping unreachable peer", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect() + node1 := nodes[0] + + badPeer := "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJx" + res := node1.RunIPFS("ping", "-n", "2", "--", badPeer) + assert.Contains(t, res.Stdout.String(), fmt.Sprintf("Looking up peer %s", badPeer)) + assert.Contains(t, res.Stderr.String(), "Error: ping failed") + }) + + t.Run("self", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons() + node1 := nodes[0] + node2 := nodes[1] + + res := node1.RunIPFS("ping", "-n", "2", "--", node1.PeerID().String()) + assert.Equal(t, 1, res.Cmd.ProcessState.ExitCode()) + assert.Contains(t, res.Stderr.String(), "can't ping self") + + res = node2.RunIPFS("ping", "-n", "2", "--", node2.PeerID().String()) + assert.Equal(t, 1, res.Cmd.ProcessState.ExitCode()) + assert.Contains(t, res.Stderr.String(), "can't ping self") + }) + + t.Run("0", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect() + node1 := nodes[0] + node2 := nodes[1] + + res := node1.RunIPFS("ping", "-n", "0", "--", node2.PeerID().String()) + assert.Equal(t, 1, res.Cmd.ProcessState.ExitCode()) + assert.Contains(t, res.Stderr.String(), "ping count must be greater than 0") + }) + + t.Run("offline", func(t *testing.T) { + t.Parallel() + nodes := harness.NewT(t).NewNodes(2).Init().StartDaemons().Connect() + node1 := nodes[0] + node2 := nodes[1] + + node2.StopDaemon() + + res := node1.RunIPFS("ping", "-n", "2", "--", node2.PeerID().String()) + assert.Equal(t, 1, res.Cmd.ProcessState.ExitCode()) + assert.Contains(t, res.Stderr.String(), "ping failed") + }) +} diff --git a/test/cli/testutils/cids.go b/test/cli/testutils/cids.go new file mode 100644 index 00000000000..cae473e083a --- /dev/null +++ b/test/cli/testutils/cids.go @@ -0,0 +1,6 @@ +package testutils + +const ( + CIDWelcomeDocs = "QmQPeNsJPyVWPFDVHb77w8G42Fvo15z4bG2X8D2GhfbSXc" + CIDEmptyDir = "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn" +) diff --git a/test/cli/testutils/requires.go b/test/cli/testutils/requires.go new file mode 100644 index 00000000000..d4b88cd6dfe --- /dev/null +++ b/test/cli/testutils/requires.go @@ -0,0 +1,37 @@ +package testutils + +import ( + "os" + "runtime" + "testing" +) + +func RequiresDocker(t *testing.T) { + if os.Getenv("TEST_NO_DOCKER") == "1" { + t.SkipNow() + } +} + +func RequiresFUSE(t *testing.T) { + if os.Getenv("TEST_NO_FUSE") == "1" { + t.SkipNow() + } +} + +func RequiresExpensive(t *testing.T) { + if os.Getenv("TEST_EXPENSIVE") == "1" || testing.Short() { + t.SkipNow() + } +} + +func RequiresPlugins(t *testing.T) { + if os.Getenv("TEST_NO_PLUGIN") == "1" { + t.SkipNow() + } +} + +func RequiresLinux(t *testing.T) { + if runtime.GOOS != "linux" { + t.SkipNow() + } +} diff --git a/test/cli/testutils/util.go b/test/cli/testutils/util.go new file mode 100644 index 00000000000..2c013f5b9e9 --- /dev/null +++ b/test/cli/testutils/util.go @@ -0,0 +1,97 @@ +package testutils + +import ( + "bufio" + "encoding/json" + "fmt" + "log" + "os" + "path/filepath" + "strings" +) + +func SplitLines(s string) []string { + var lines []string + scanner := bufio.NewScanner(strings.NewReader(s)) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines +} + +func MustOpen(name string) *os.File { + f, err := os.Open(name) + if err != nil { + log.Panicf("opening %s: %s", name, err) + } + return f +} + +// StrCat takes a bunch of strings or string slices +// and concats them all together into one string slice. +// If an arg is not one of those types, this panics. +// If an arg is an empty string, it is dropped. +func StrCat(args ...interface{}) []string { + res := make([]string, 0) + for _, a := range args { + if s, ok := a.(string); ok { + if s != "" { + res = append(res, s) + } + continue + } + if ss, ok := a.([]string); ok { + for _, s := range ss { + if s != "" { + res = append(res, s) + } + } + continue + } + panic(fmt.Sprintf("arg '%v' must be a string or string slice, but is '%T'", a, a)) + } + return res +} + +// PreviewStr returns a preview of s, which is a prefix for logging that avoids dumping a huge string to logs. +func PreviewStr(s string) string { + suffix := "..." + previewLength := 10 + if len(s) < previewLength { + previewLength = len(s) + suffix = "" + } + return s[0:previewLength] + suffix +} + +type JSONObj map[string]interface{} + +func ToJSONStr(m JSONObj) string { + b, err := json.Marshal(m) + if err != nil { + panic(err) + } + return string(b) +} + +// Searches for a file in a dir, then the parent dir, etc. +// If the file is not found, an empty string is returned. +func FindUp(name, dir string) string { + curDir := dir + for { + entries, err := os.ReadDir(curDir) + if err != nil { + panic(err) + } + for _, e := range entries { + if name == e.Name() { + return filepath.Join(curDir, name) + } + } + newDir := filepath.Dir(curDir) + if newDir == curDir { + return "" + } + curDir = newDir + } +} diff --git a/test/sharness/t0010-basic-commands.sh b/test/sharness/t0010-basic-commands.sh deleted file mode 100755 index 2b86466cf55..00000000000 --- a/test/sharness/t0010-basic-commands.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test installation and some basic commands" - -. lib/test-lib.sh - -test_expect_success "current dir is writable" ' - echo "It works!" >test.txt -' - -test_expect_success "ipfs version succeeds" ' - ipfs version >version.txt -' - -test_expect_success "ipfs --version success" ' - ipfs --version -' - -test_expect_success "ipfs version output looks good" ' - egrep "^ipfs version [0-9]+\.[0-9]+\.[0-9]" version.txt >/dev/null || - test_fsh cat version.txt -' - -test_expect_success "ipfs versions matches ipfs --version" ' - ipfs version > version.txt && - ipfs --version > version2.txt && - diff version2.txt version.txt || - test_fsh ipfs --version - -' - -test_expect_success "ipfs version --all has all required fields" ' - ipfs version --all > version_all.txt && - grep "Kubo version" version_all.txt && - grep "Repo version" version_all.txt && - grep "System version" version_all.txt && - grep "Golang version" version_all.txt -' - -test_expect_success "ipfs version deps succeeds" ' - ipfs version deps >deps.txt -' - -test_expect_success "ipfs version deps output looks good ( set \$GOIPFSTEST_SKIP_LOCAL_DEVTREE_DEPS_CHECK to skip this test )" ' - head -1 deps.txt | grep "go-ipfs@(devel)" && - [[ "$GOIPFSTEST_SKIP_LOCAL_DEVTREE_DEPS_CHECK" == "1" ]] || - [[ $(tail -n +2 deps.txt | egrep -v -c "^[^ @]+@v[^ @]+( => [^ @]+@v[^ @]+)?$") -eq 0 ]] || - test_fsh cat deps.txt -' - -test_expect_success "'ipfs commands' succeeds" ' - ipfs commands >commands.txt -' - -test_expect_success "'ipfs commands' output looks good" ' - grep "ipfs add" commands.txt && - grep "ipfs daemon" commands.txt && - grep "ipfs update" commands.txt -' - -test_expect_success "All sub-commands accept help" ' - echo 0 > fail - while read -r cmd - do - ${cmd:0:4} help ${cmd:5} >/dev/null || - { echo "$cmd does not accept --help"; echo 1 > fail; } - echo stuff | $cmd --help >/dev/null || - { echo "$cmd does not accept --help when using stdin"; echo 1 > fail; } - done fail - while read -r cmd - do - $cmd --help >/dev/null || - { echo "$cmd does not accept --help"; echo 1 > fail; } - echo stuff | $cmd --help >/dev/null || - { echo "$cmd does not accept --help when using stdin"; echo 1 > fail; } - done fail - ipfs --help > help.txt - cut -d" " -f 2 commands.txt | grep -v ipfs | sort -u | \ - while read cmd - do - grep " $cmd" help.txt > /dev/null || - { echo "missing $cmd from helptext"; echo 1 > fail; } - done - - if [ $(cat fail) = 1 ]; then - return 1 - fi -' - -test_expect_failure "All ipfs commands docs are 80 columns or less" ' - echo 0 > fail - while read cmd - do - LENGTH="$($cmd --help | awk "{ print length }" | sort -nr | head -1)" - [ $LENGTH -gt 80 ] && - { echo "$cmd help text is longer than 79 chars ($LENGTH)"; echo 1 > fail; } - done fail - while read -r cmd - do - test_must_fail $cmd --badflag >/dev/null 2>&1 || - { echo "$cmd exit with code 0 when passed --badflag"; echo 1 > fail; } - done commands.txt -' - -test_expect_success "'ipfs commands --flags' output looks good" ' - grep "ipfs pin add --recursive / ipfs pin add -r" commands.txt && - grep "ipfs id --format / ipfs id -f" commands.txt && - grep "ipfs repo gc --quiet / ipfs repo gc -q" commands.txt -' - - - -test_done diff --git a/test/sharness/t0011-completion.sh b/test/sharness/t0011-completion.sh deleted file mode 100755 index cc1c3f0dc05..00000000000 --- a/test/sharness/t0011-completion.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test generated bash completions" - -. lib/test-lib.sh - -test_expect_success "'ipfs commands completion bash' succeeds" ' - ipfs commands completion bash > completions.bash -' - -test_expect_success "generated completions defines '_ipfs'" ' - bash -c "source completions.bash && type -t _ipfs" -' - -test_done diff --git a/test/sharness/t0020-init.sh b/test/sharness/t0020-init.sh deleted file mode 100755 index c1eb209a84c..00000000000 --- a/test/sharness/t0020-init.sh +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2014 Christian Couder -# MIT Licensed; see the LICENSE file in this repository. -# - -test_description="Test init command" - -. lib/test-lib.sh - -# test that ipfs fails to init with BAD_IPFS_DIR that isn't writeable -test_expect_success "create dir and change perms succeeds" ' - export BAD_IPFS_DIR="$(pwd)/.badipfs" && - mkdir "$BAD_IPFS_DIR" && - chmod 000 "$BAD_IPFS_DIR" -' - -test_expect_success "ipfs init fails" ' - test_must_fail ipfs init --repo-dir "$BAD_IPFS_DIR" 2> init_fail_out -' - -# Under Windows/Cygwin the error message is different, -# so we use the STD_ERR_MSG prereq. -if test_have_prereq STD_ERR_MSG; then - init_err_msg="Error: error loading plugins: open $BAD_IPFS_DIR/config: permission denied" -else - init_err_msg="Error: error loading plugins: open $BAD_IPFS_DIR/config: The system cannot find the path specified." -fi - -test_expect_success "ipfs init output looks good" ' - echo "$init_err_msg" >init_fail_exp && - test_cmp init_fail_exp init_fail_out -' - -test_expect_success "cleanup dir with bad perms" ' - chmod 775 "$BAD_IPFS_DIR" && - rmdir "$BAD_IPFS_DIR" -' - -# test no repo error message -# this applies to `ipfs add sth`, `ipfs refs ` -test_expect_success "ipfs cat fails" ' - export IPFS_DIR="$(pwd)/.ipfs" && - test_must_fail ipfs cat --repo-dir "$IPFS_DIR" Qmaa4Rw81a3a1VEx4LxB7HADUAXvZFhCoRdBzsMZyZmqHD 2> cat_fail_out -' - -test_expect_success "ipfs cat no repo message looks good" ' - echo "Error: no IPFS repo found in $IPFS_DIR." > cat_fail_exp && - echo "please run: '"'"'ipfs init'"'"'" >> cat_fail_exp && - test_path_cmp cat_fail_exp cat_fail_out -' - -# $1 must be one of 'rsa', 'ed25519' or '' (for default key algorithm). -test_ipfs_init_flags() { - TEST_ALG=$1 - - # test that init succeeds - test_expect_success "ipfs init succeeds" ' - export IPFS_DIR="$(pwd)/.ipfs" && - echo "IPFS_DIR: \"$IPFS_DIR\"" && - RSA_BITS="2048" && - case $TEST_ALG in - "rsa") - ipfs init --repo-dir "$IPFS_DIR" --algorithm=rsa --bits="$RSA_BITS" >actual_init || test_fsh cat actual_init - ;; - "ed25519") - ipfs init --repo-dir "$IPFS_DIR" --algorithm=ed25519 >actual_init || test_fsh cat actual_init - ;; - *) - ipfs init --repo-dir "$IPFS_DIR" --algorithm=rsa --bits="$RSA_BITS" >actual_init || test_fsh cat actual_init - ;; - esac - ' - - test_expect_success ".ipfs/ has been created" ' - test -d "$IPFS_DIR" && - test -f "$IPFS_DIR/config" && - test -d "$IPFS_DIR/datastore" && - test -d "$IPFS_DIR/blocks" && - test ! -f ._check_writeable || - test_fsh ls -al $IPFS_DIR - ' - - test_expect_success "ipfs config succeeds" ' - echo /ipfs >expected_config && - ipfs config --repo-dir "$IPFS_DIR" Mounts.IPFS >actual_config && - test_cmp expected_config actual_config - ' - - test_expect_success "ipfs peer id looks good" ' - PEERID=$(ipfs config --repo-dir "$IPFS_DIR" Identity.PeerID) && - test_check_peerid "$PEERID" - ' - - test_expect_success "ipfs init output looks good" ' - STARTFILE="ipfs cat /ipfs/$HASH_WELCOME_DOCS/readme" && - - echo "generating $RSA_BITS-bit RSA keypair...done" >rsa_expected && - echo "peer identity: $PEERID" >>rsa_expected && - echo "initializing IPFS node at $IPFS_DIR" >>rsa_expected && - echo "to get started, enter:" >>rsa_expected && - printf "\\n\\t$STARTFILE\\n\\n" >>rsa_expected && - - echo "generating ED25519 keypair...done" >ed25519_expected && - echo "peer identity: $PEERID" >>ed25519_expected && - echo "initializing IPFS node at $IPFS_DIR" >>ed25519_expected && - echo "to get started, enter:" >>ed25519_expected && - printf "\\n\\t$STARTFILE\\n\\n" >>ed25519_expected && - - case $TEST_ALG in - rsa) - test_cmp rsa_expected actual_init - ;; - ed25519) - test_cmp ed25519_expected actual_init - ;; - *) - test_cmp rsa_expected actual_init - ;; - esac - ' - - test_expect_success "Welcome readme exists" ' - ipfs cat /ipfs/$HASH_WELCOME_DOCS/readme - ' - - test_expect_success "clean up ipfs dir" ' - rm -rf "$IPFS_DIR" - ' - - test_expect_success "'ipfs init --empty-repo' succeeds" ' - RSA_BITS="2048" && - case $TEST_ALG in - rsa) - ipfs init --repo-dir "$IPFS_DIR" --algorithm=rsa --bits="$RSA_BITS" --empty-repo >actual_init - ;; - ed25519) - ipfs init --repo-dir "$IPFS_DIR" --algorithm=ed25519 --empty-repo >actual_init - ;; - *) - ipfs init --repo-dir "$IPFS_DIR" --empty-repo >actual_init - ;; - esac - ' - - test_expect_success "ipfs peer id looks good" ' - PEERID=$(ipfs config --repo-dir "$IPFS_DIR" Identity.PeerID) && - test_check_peerid "$PEERID" - ' - - test_expect_success "'ipfs init --empty-repo' output looks good" ' - - echo "generating $RSA_BITS-bit RSA keypair...done" >rsa_expected && - echo "peer identity: $PEERID" >>rsa_expected && - echo "initializing IPFS node at $IPFS_DIR" >>rsa_expected && - - echo "generating ED25519 keypair...done" >ed25519_expected && - echo "peer identity: $PEERID" >>ed25519_expected && - echo "initializing IPFS node at $IPFS_DIR" >>ed25519_expected && - - case $TEST_ALG in - rsa) - test_cmp rsa_expected actual_init - ;; - ed25519) - test_cmp ed25519_expected actual_init - ;; - *) - test_cmp ed25519_expected actual_init - ;; - esac - ' - - test_expect_success "Welcome readme doesn't exist" ' - test_must_fail ipfs cat /ipfs/$HASH_WELCOME_DOCS/readme - ' - - test_expect_success "ipfs id agent string contains correct version" ' - ipfs id -f "" | grep $(ipfs version -n) - ' - - test_expect_success "clean up ipfs dir" ' - rm -rf "$IPFS_DIR" - ' -} -test_ipfs_init_flags 'ed25519' -test_ipfs_init_flags 'rsa' -test_ipfs_init_flags '' - -# test init profiles -test_expect_success "'ipfs init --profile' with invalid profile fails" ' - RSA_BITS="2048" && - test_must_fail ipfs init --repo-dir "$IPFS_DIR" --profile=nonexistent_profile 2> invalid_profile_out - EXPECT="Error: invalid configuration profile: nonexistent_profile" && - grep "$EXPECT" invalid_profile_out -' - -test_expect_success "'ipfs init --profile' succeeds" ' - RSA_BITS="2048" && - ipfs init --repo-dir "$IPFS_DIR" --profile=server -' - -test_expect_success "'ipfs config Swarm.AddrFilters' looks good" ' - ipfs config --repo-dir "$IPFS_DIR" Swarm.AddrFilters > actual_config && - test $(cat actual_config | wc -l) = 18 -' - -test_expect_success "clean up ipfs dir" ' - rm -rf "$IPFS_DIR" -' - -test_expect_success "'ipfs init --profile=test' succeeds" ' - RSA_BITS="2048" && - ipfs init --repo-dir "$IPFS_DIR" --profile=test -' - -test_expect_success "'ipfs config Bootstrap' looks good" ' - ipfs config --repo-dir "$IPFS_DIR" Bootstrap > actual_config && - test $(cat actual_config) = "[]" -' - -test_expect_success "'ipfs config Addresses.API' looks good" ' - ipfs config --repo-dir "$IPFS_DIR" Addresses.API > actual_config && - test $(cat actual_config) = "/ip4/127.0.0.1/tcp/0" -' - -test_expect_success "ipfs init from existing config succeeds" ' - export ORIG_PATH=$IPFS_DIR - export IPFS_DIR=$(pwd)/.ipfs-clone - - ipfs init --repo-dir "$IPFS_DIR" "$ORIG_PATH/config" && - ipfs config --repo-dir "$IPFS_DIR" Addresses.API > actual_config && - test $(cat actual_config) = "/ip4/127.0.0.1/tcp/0" -' - -test_expect_success "clean up ipfs clone dir and reset IPFS_DIR" ' - rm -rf "$IPFS_DIR" && - export IPFS_DIR=$ORIG_PATH -' - -test_expect_success "clean up ipfs dir" ' - rm -rf "$IPFS_DIR" -' - -test_expect_success "'ipfs init --profile=lowpower' succeeds" ' - RSA_BITS="2048" && - ipfs init --repo-dir "$IPFS_DIR" --profile=lowpower -' - -test_expect_success "'ipfs config Discovery.Routing' looks good" ' - ipfs config --repo-dir "$IPFS_DIR" Routing.Type > actual_config && - test $(cat actual_config) = "dhtclient" -' - -test_expect_success "clean up ipfs dir" ' - rm -rf "$IPFS_DIR" -' - -test_init_ipfs - -test_launch_ipfs_daemon - -test_expect_success "ipfs init should not run while daemon is running" ' - test_must_fail ipfs init --repo-dir "$IPFS_DIR" 2> daemon_running_err && - EXPECT="Error: ipfs daemon is running. please stop it to run this command" && - grep "$EXPECT" daemon_running_err -' - -test_kill_ipfs_daemon - -test_done diff --git a/test/sharness/t0702-delegated-routing-http.sh b/test/sharness/t0702-delegated-routing-http.sh deleted file mode 100755 index 03f452fd4d7..00000000000 --- a/test/sharness/t0702-delegated-routing-http.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/env bash - -test_description="Test delegated routing via HTTP endpoint" - -. lib/test-lib.sh - -if ! test_have_prereq SOCAT; then - skip_all="skipping '$test_description': socat is not available" - test_done -fi - -# simple http routing server mock -# local endpoint responds with deterministic application/vnd.ipfs.rpc+dag-json; version=1 -HTTP_ROUTING_PORT=5098 -function start_http_routing_mock_endpoint() { - REMOTE_SERVER_LOG="http-routing-server.log" - rm -f $REMOTE_SERVER_LOG - - touch response - socat tcp-listen:$HTTP_ROUTING_PORT,fork,bind=127.0.0.1,reuseaddr 'SYSTEM:cat response'!!CREATE:$REMOTE_SERVER_LOG & - REMOTE_SERVER_PID=$! - - socat /dev/null tcp:127.0.0.1:$HTTP_ROUTING_PORT,retry=10 - return $? -} -function serve_http_routing_response() { - local body=$1 - local status_code=${2:-"200 OK"} - local length=$((1 + ${#body})) - echo -e "HTTP/1.1 $status_code\nContent-Length: $length\nContent-Type: application/json\n\n$body" > response -} -function stop_http_routing_mock_endpoint() { - exec 7<&- - kill $REMOTE_SERVER_PID > /dev/null 2>&1 - wait $REMOTE_SERVER_PID || true -} - -# daemon running in online mode to ensure Pin.origins/PinStatus.delegates work -test_init_ipfs - -# based on static, synthetic http routing messages: -# t0702-delegated-routing-http/FindProvidersRequest -# t0702-delegated-routing-http/FindProvidersResponse -FINDPROV_CID="baeabep4vu3ceru7nerjjbk37sxb7wmftteve4hcosmyolsbsiubw2vr6pqzj6mw7kv6tbn6nqkkldnklbjgm5tzbi4hkpkled4xlcr7xz4bq" -EXPECTED_PROV="12D3KooWARYacCc6eoCqvsS9RW9MA2vo51CV75deoiqssx3YgyYJ" - -test_expect_success "default Routing config has no Routers defined" ' - echo null > expected && - ipfs config show | jq .Routing.Routers > actual && - test_cmp expected actual -' - -# turn off all implicit routers -ipfs config Routing.Type none || exit 1 -test_launch_ipfs_daemon -test_expect_success "disabling default router (dht) works" ' - ipfs config Routing.Type > actual && - echo none > expected && - test_cmp expected actual -' -test_expect_success "no routers means findprovs returns no results" ' - ipfs routing findprovs "$FINDPROV_CID" > actual && - echo -n > expected && - test_cmp expected actual -' - -test_kill_ipfs_daemon - -ipfs config Routing.Type --json '"custom"' || exit 1 -ipfs config Routing.Methods --json '{ - "find-peers": { - "RouterName": "TestDelegatedRouter" - }, - "find-providers": { - "RouterName": "TestDelegatedRouter" - }, - "get-ipns": { - "RouterName": "TestDelegatedRouter" - }, - "provide": { - "RouterName": "TestDelegatedRouter" - } - }' || exit 1 - -test_expect_success "missing method params makes daemon fails" ' - echo "Error: constructing the node (see log for full detail): method name \"put-ipns\" is missing from Routing.Methods config param" > expected_error && - GOLOG_LOG_LEVEL=fatal ipfs daemon 2> actual_error || exit 0 && - test_cmp expected_error actual_error -' - -ipfs config Routing.Methods --json '{ - "find-peers": { - "RouterName": "TestDelegatedRouter" - }, - "find-providers": { - "RouterName": "TestDelegatedRouter" - }, - "get-ipns": { - "RouterName": "TestDelegatedRouter" - }, - "provide": { - "RouterName": "TestDelegatedRouter" - }, - "put-ipns": { - "RouterName": "TestDelegatedRouter" - }, - "NOT_SUPPORTED": { - "RouterName": "TestDelegatedRouter" - } - }' || exit 1 - -test_expect_success "having wrong methods makes daemon fails" ' - echo "Error: constructing the node (see log for full detail): method name \"NOT_SUPPORTED\" is not a supported method on Routing.Methods config param" > expected_error && - GOLOG_LOG_LEVEL=fatal ipfs daemon 2> actual_error || exit 0 && - test_cmp expected_error actual_error -' - -# set Routing config to only use delegated routing via mocked http routing endpoint - -ipfs config Routing.Type --json '"custom"' || exit 1 -ipfs config Routing.Routers.TestDelegatedRouter --json '{ - "Type": "http", - "Parameters": { - "Endpoint": "http://127.0.0.1:5098/routing/v1" - } -}' || exit 1 -ipfs config Routing.Methods --json '{ - "find-peers": { - "RouterName": "TestDelegatedRouter" - }, - "find-providers": { - "RouterName": "TestDelegatedRouter" - }, - "get-ipns": { - "RouterName": "TestDelegatedRouter" - }, - "provide": { - "RouterName": "TestDelegatedRouter" - }, - "put-ipns": { - "RouterName": "TestDelegatedRouter" - } - }' || exit 1 - -test_expect_success "adding http delegated routing endpoint to Routing.Routers config works" ' - echo "http://127.0.0.1:5098/routing/v1" > expected && - ipfs config Routing.Routers.TestDelegatedRouter.Parameters.Endpoint > actual && - test_cmp expected actual -' - -test_launch_ipfs_daemon - -test_expect_success "start_http_routing_mock_endpoint" ' - start_http_routing_mock_endpoint -' - -test_expect_success "'ipfs routing findprovs' returns result from delegated http router" ' - serve_http_routing_response "$(<../t0702-delegated-routing-http/FindProvidersResponse)" && - echo "$EXPECTED_PROV" > expected && - ipfs routing findprovs "$FINDPROV_CID" > actual && - test_cmp expected actual -' - -test_expect_success "stop_http_routing_mock_endpoint" ' - stop_http_routing_mock_endpoint -' - - -test_kill_ipfs_daemon -test_done -# vim: ts=2 sw=2 sts=2 et: diff --git a/test/unit/Rules.mk b/test/unit/Rules.mk index 30f712a451d..69404637c11 100644 --- a/test/unit/Rules.mk +++ b/test/unit/Rules.mk @@ -2,7 +2,7 @@ include mk/header.mk CLEAN += $(d)/gotest.json $(d)/gotest.junit.xml -$(d)/gotest.junit.xml: clean test/bin/gotestsum coverage/unit_tests.coverprofile +$(d)/gotest.junit.xml: test/bin/gotestsum coverage/unit_tests.coverprofile gotestsum --no-color --junitfile $@ --raw-command cat $(@D)/gotest.json include mk/footer.mk