diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..5f59c210 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,47 @@ +language: go +dist: trusty + +go: + - 1.9.x + - 1.10.x + +env: + matrix: + - TARGET=amd64 + - TARGET=arm + - TARGET=arm64 + - TARGET=ppc64le + - TARGET=s390x + +matrix: + fast_finish: true + +# Ubuntu 14.04 (trusty) doesn't have a new enough util-linux for nsenter, +# so we have to build it ourselves. +before_install: + - + - wget https://www.kernel.org/pub/linux/utils/util-linux/v2.24/util-linux-2.24.1.tar.gz -qO - | tar -xz -C . + - sudo apt-get update + - sudo apt-get install -y libncurses5-dev libslang2-dev gettext zlib1g-dev libselinux1-dev debhelper lsb-release pkg-config po-debconf autoconf automake autopoint libtool + - pushd util-linux-2.24.1 + - ./autogen.sh + - ./configure + - make + - sudo cp ./nsenter /usr/bin + - popd + - rm -rf util-linux-2.24.1 + +install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/modocache/gover + - go get github.com/mattn/goveralls + - go get -t ./... + +script: + - make check + +notifications: + email: false + +git: + depth: 9999999 diff --git a/Makefile b/Makefile index a774b1e6..3b879a7a 100644 --- a/Makefile +++ b/Makefile @@ -44,13 +44,16 @@ binaries: ocicnitool ocicnitool: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) tools/ocicnitool $(PROJECT)) $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o $@ $(PROJECT)/tools/ocicnitool +check: .gopathok + @./hack/test-go.sh $(GOPKGDIR) + @./hack/verify-gofmt.sh + clean: ifneq ($(GOPATH),) rm -f "$(GOPATH)/.gopathok" endif rm -rf _output -.PHONY: .gitvalidation # When this is running in travis, it will only check the travis commit range .gitvalidation: .gopathok ifeq ($(TRAVIS),true) @@ -59,8 +62,6 @@ else GIT_CHECK_EXCLUDE="./vendor ./_output" $(GOPATH)/bin/git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD endif -.PHONY: install.tools - install.tools: .install.gitvalidation .install.gitvalidation: .gopathok @@ -73,5 +74,7 @@ install.tools: .install.gitvalidation clean \ default \ gofmt \ - help - + help \ + check \ + install.tools \ + .gitvalidation diff --git a/hack/test-go.sh b/hack/test-go.sh new file mode 100755 index 00000000..29652798 --- /dev/null +++ b/hack/test-go.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +cd $1 + +echo -n "Running tests " +function testrun { + bash -c "umask 0; PATH=$PATH go test $@" +} +if [ ! -z "${COVERALLS:-""}" ]; then + # coverage profile only works per-package + PKGS="$(go list ./... | xargs echo)" + echo "with coverage profile generation..." + i=0 + for t in ${PKGS}; do + testrun "-covermode set -coverprofile ${i}.coverprofile ${t}" + i=$((i+1)) + done + gover + goveralls -service=travis-ci -coverprofile=gover.coverprofile +else + echo "without coverage profile generation..." + testrun "./..." +fi + diff --git a/pkg/ocicni/ocicni.go b/pkg/ocicni/ocicni.go index 0978669d..33a3ae06 100644 --- a/pkg/ocicni/ocicni.go +++ b/pkg/ocicni/ocicni.go @@ -4,12 +4,16 @@ import ( "errors" "fmt" "os" + "path" "sort" "strings" "sync" "github.com/containernetworking/cni/libcni" + cniinvoke "github.com/containernetworking/cni/pkg/invoke" cnitypes "github.com/containernetworking/cni/pkg/types" + cnicurrent "github.com/containernetworking/cni/pkg/types/current" + cniversion "github.com/containernetworking/cni/pkg/version" "github.com/fsnotify/fsnotify" "github.com/sirupsen/logrus" ) @@ -18,14 +22,16 @@ type cniNetworkPlugin struct { loNetwork *cniNetwork sync.RWMutex - defaultNetwork *cniNetwork + defaultNetName string + networks map[string]*cniNetwork - nsManager *nsManager - pluginDir string - cniDirs []string - vendorCNIDirPrefix string + nsManager *nsManager + confDir string + binDirs []string - monitorNetDirChan chan struct{} + shutdownChan chan struct{} + watcher *fsnotify.Watcher + done *sync.WaitGroup // The pod map provides synchronization for a given pod's network // operations. Each pod's setup/teardown/status operations @@ -33,12 +39,17 @@ type cniNetworkPlugin struct { // pods can proceed in parallel. podsLock sync.Mutex pods map[string]*podLock + + // For testcases + exec cniinvoke.Exec + cacheDir string } type cniNetwork struct { name string + filePath string NetworkConfig *libcni.NetworkConfigList - CNIConfig libcni.CNI + CNIConfig *libcni.CNIConfig } var errMissingDefaultNetwork = errors.New("Missing CNI default network") @@ -98,75 +109,111 @@ func (plugin *cniNetworkPlugin) podUnlock(podNetwork PodNetwork) { } } -func (plugin *cniNetworkPlugin) monitorNetDir() { +func newWatcher(confDir string) (*fsnotify.Watcher, error) { + // Ensure plugin directory exists, because the following monitoring logic + // relies on that. + if err := os.MkdirAll(confDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create %q: %v", confDir, err) + } + watcher, err := fsnotify.NewWatcher() if err != nil { - logrus.Errorf("could not create new watcher %v", err) - return + return nil, fmt.Errorf("could not create new watcher %v", err) } - defer watcher.Close() + defer func() { + // Close watcher on error + if err != nil { + watcher.Close() + } + }() - if err = watcher.Add(plugin.pluginDir); err != nil { - logrus.Errorf("Failed to add watch on %q: %v", plugin.pluginDir, err) - return + if err = watcher.Add(confDir); err != nil { + return nil, fmt.Errorf("failed to add watch on %q: %v", confDir, err) } - // Now that `watcher` is running and watching the `pluginDir` - // gather the initial configuration, before starting the - // goroutine which will actually process events. It has to be - // done in this order to avoid missing any updates which might - // otherwise occur between gathering the initial configuration - // and starting the watcher. - if err := plugin.syncNetworkConfig(); err != nil { - logrus.Infof("Initial CNI setting failed, continue monitoring: %v", err) - } else { - logrus.Infof("Initial CNI setting succeeded") - } - - go func() { - for { - select { - case event := <-watcher.Events: - logrus.Debugf("CNI monitoring event %v", event) - if event.Op&fsnotify.Create != fsnotify.Create && - event.Op&fsnotify.Write != fsnotify.Write { - continue - } + return watcher, nil +} - if err = plugin.syncNetworkConfig(); err == nil { - logrus.Infof("CNI asynchronous setting succeeded") - continue +func (plugin *cniNetworkPlugin) monitorConfDir(start *sync.WaitGroup) { + start.Done() + plugin.done.Add(1) + defer plugin.done.Done() + for { + select { + case event := <-plugin.watcher.Events: + logrus.Warningf("CNI monitoring event %v", event) + + var defaultDeleted bool + createWrite := (event.Op&fsnotify.Create == fsnotify.Create || + event.Op&fsnotify.Write == fsnotify.Write) + if event.Op&fsnotify.Remove == fsnotify.Remove { + // Care about the event if the default network + // was just deleted + defNet := plugin.getDefaultNetwork() + if defNet != nil && event.Name == defNet.filePath { + defaultDeleted = true } - logrus.Errorf("CNI setting failed, continue monitoring: %v", err) + } + if !createWrite && !defaultDeleted { + continue + } - case err := <-watcher.Errors: - if err == nil { - continue - } - logrus.Errorf("CNI monitoring error %v", err) - close(plugin.monitorNetDirChan) - return + if err := plugin.syncNetworkConfig(); err != nil { + logrus.Errorf("CNI config loading failed, continue monitoring: %v", err) + continue } + + case err := <-plugin.watcher.Errors: + if err == nil { + continue + } + logrus.Errorf("CNI monitoring error %v", err) + return + + case <-plugin.shutdownChan: + return } - }() + } +} - <-plugin.monitorNetDirChan +// InitCNI takes a binary directory in which to search for CNI plugins, and +// a configuration directory in which to search for CNI JSON config files. +// If no valid CNI configs exist, network requests will fail until valid CNI +// config files are present in the config directory. +// If defaultNetName is not empty, a CNI config with that network name will +// be used as the default CNI network, and container network operations will +// fail until that network config is present and valid. +func InitCNI(defaultNetName string, confDir string, binDirs ...string) (CNIPlugin, error) { + return initCNI(nil, "", defaultNetName, confDir, binDirs...) } -// InitCNI takes the plugin directory and CNI directories where the CNI config -// files should be searched for. If no valid CNI configs exist, network requests -// will fail until valid CNI config files are present in the config directory. -func InitCNI(pluginDir string, cniDirs ...string) (CNIPlugin, error) { - vendorCNIDirPrefix := "" +// Internal function to allow faking out exec functions for testing +func initCNI(exec cniinvoke.Exec, cacheDir, defaultNetName string, confDir string, binDirs ...string) (CNIPlugin, error) { + if confDir == "" { + confDir = DefaultConfDir + } + if len(binDirs) == 0 { + binDirs = []string{DefaultBinDir} + } plugin := &cniNetworkPlugin{ - defaultNetwork: nil, - loNetwork: getLoNetwork(cniDirs, vendorCNIDirPrefix), - pluginDir: pluginDir, - cniDirs: cniDirs, - vendorCNIDirPrefix: vendorCNIDirPrefix, - monitorNetDirChan: make(chan struct{}), - pods: make(map[string]*podLock), + defaultNetName: defaultNetName, + networks: make(map[string]*cniNetwork), + loNetwork: getLoNetwork(exec, binDirs), + confDir: confDir, + binDirs: binDirs, + shutdownChan: make(chan struct{}), + done: &sync.WaitGroup{}, + pods: make(map[string]*podLock), + exec: exec, + cacheDir: cacheDir, + } + + if exec == nil { + exec = &cniinvoke.DefaultExec{ + RawExec: &cniinvoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: cniversion.PluginDecoder{}, + } } nsm, err := newNSManager() @@ -175,33 +222,37 @@ func InitCNI(pluginDir string, cniDirs ...string) (CNIPlugin, error) { } plugin.nsManager = nsm - // Ensure plugin directory exists, because the following monitoring logic - // relies on that. - if err := os.MkdirAll(pluginDir, 0755); err != nil { + plugin.syncNetworkConfig() + + plugin.watcher, err = newWatcher(plugin.confDir) + if err != nil { return nil, err } - go plugin.monitorNetDir() + startWg := sync.WaitGroup{} + startWg.Add(1) + go plugin.monitorConfDir(&startWg) + startWg.Wait() return plugin, nil } -func getDefaultCNINetwork(pluginDir string, cniDirs []string, vendorCNIDirPrefix string) (*cniNetwork, error) { - if pluginDir == "" { - pluginDir = DefaultNetDir - } - if len(cniDirs) == 0 { - cniDirs = []string{DefaultCNIDir} - } +func (plugin *cniNetworkPlugin) Shutdown() error { + close(plugin.shutdownChan) + plugin.watcher.Close() + plugin.done.Wait() + return nil +} - files, err := libcni.ConfFiles(pluginDir, []string{".conf", ".conflist", ".json"}) - switch { - case err != nil: - return nil, err - case len(files) == 0: - return nil, errMissingDefaultNetwork +func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[string]*cniNetwork, string, error) { + files, err := libcni.ConfFiles(confDir, []string{".conf", ".conflist", ".json"}) + if err != nil { + return nil, "", err } + networks := make(map[string]*cniNetwork) + defaultNetName := "" + sort.Strings(files) for _, confFile := range files { var confList *libcni.NetworkConfigList @@ -231,27 +282,28 @@ func getDefaultCNINetwork(pluginDir string, cniDirs []string, vendorCNIDirPrefix logrus.Warningf("CNI config list %s has no networks, skipping", confFile) continue } - logrus.Infof("CNI network %s (type=%v) is used from %s", confList.Name, confList.Plugins[0].Network.Type, confFile) - // Search for vendor-specific plugins as well as default plugins in the CNI codebase. - vendorDir := vendorCNIDir(vendorCNIDirPrefix, confList.Plugins[0].Network.Type) - cninet := &libcni.CNIConfig{ - Path: append(cniDirs, vendorDir), + if confList.Name == "" { + confList.Name = path.Base(confFile) } - network := &cniNetwork{name: confList.Name, NetworkConfig: confList, CNIConfig: cninet} - return network, nil - } - return nil, fmt.Errorf("No valid networks found in %s", pluginDir) -} -func vendorCNIDir(prefix, pluginType string) string { - return fmt.Sprintf(VendorCNIDirTemplate, prefix, pluginType) -} + logrus.Infof("Found CNI network %s (type=%v) at %s", confList.Name, confList.Plugins[0].Network.Type, confFile) + + networks[confList.Name] = &cniNetwork{ + name: confList.Name, + filePath: confFile, + NetworkConfig: confList, + CNIConfig: libcni.NewCNIConfig(binDirs, exec), + } -func getLoNetwork(cniDirs []string, vendorDirPrefix string) *cniNetwork { - if len(cniDirs) == 0 { - cniDirs = []string{DefaultCNIDir} + if defaultNetName == "" { + defaultNetName = confList.Name + } } + return networks, defaultNetName, nil +} + +func getLoNetwork(exec cniinvoke.Exec, binDirs []string) *cniNetwork { loConfig, err := libcni.ConfListFromBytes([]byte(`{ "cniVersion": "0.2.0", "name": "cni-loopback", @@ -264,45 +316,62 @@ func getLoNetwork(cniDirs []string, vendorDirPrefix string) *cniNetwork { // catch this panic(err) } - vendorDir := vendorCNIDir(vendorDirPrefix, loConfig.Plugins[0].Network.Type) - cninet := &libcni.CNIConfig{ - Path: append(cniDirs, vendorDir), - } loNetwork := &cniNetwork{ name: "lo", NetworkConfig: loConfig, - CNIConfig: cninet, + CNIConfig: libcni.NewCNIConfig(binDirs, exec), } return loNetwork } func (plugin *cniNetworkPlugin) syncNetworkConfig() error { - network, err := getDefaultCNINetwork(plugin.pluginDir, plugin.cniDirs, plugin.vendorCNIDirPrefix) + networks, defaultNetName, err := loadNetworks(plugin.exec, plugin.confDir, plugin.binDirs) if err != nil { - logrus.Errorf("error updating cni config: %s", err) return err } - plugin.setDefaultNetwork(network) + + plugin.Lock() + defer plugin.Unlock() + if plugin.defaultNetName == "" { + plugin.defaultNetName = defaultNetName + } + plugin.networks = networks return nil } -func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork { +func (plugin *cniNetworkPlugin) getNetwork(name string) (*cniNetwork, error) { plugin.RLock() defer plugin.RUnlock() - return plugin.defaultNetwork + net, ok := plugin.networks[name] + if !ok { + return nil, fmt.Errorf("CNI network %q not found", name) + } + return net, nil } -func (plugin *cniNetworkPlugin) setDefaultNetwork(n *cniNetwork) { - plugin.Lock() - defer plugin.Unlock() - plugin.defaultNetwork = n +func (plugin *cniNetworkPlugin) getDefaultNetworkName() string { + plugin.RLock() + defer plugin.RUnlock() + return plugin.defaultNetName } -func (plugin *cniNetworkPlugin) checkInitialized() error { - if plugin.getDefaultNetwork() == nil { - return errors.New("cni config uninitialized") +func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork { + defaultNetName := plugin.getDefaultNetworkName() + if defaultNetName == "" { + return nil + } + network, _ := plugin.getNetwork(defaultNetName) + return network +} + +// networksAvailable returns an error if the pod requests no networks and the +// plugin has no default network, and thus the plugin has no idea what network +// to attach the pod to. +func (plugin *cniNetworkPlugin) networksAvailable(podNetwork *PodNetwork) error { + if len(podNetwork.Networks) == 0 && plugin.getDefaultNetwork() == nil { + return errMissingDefaultNetwork } return nil } @@ -311,59 +380,119 @@ func (plugin *cniNetworkPlugin) Name() string { return CNIPluginName } -func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) (cnitypes.Result, error) { - if err := plugin.checkInitialized(); err != nil { +func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, forEachFunc func(*cniNetwork, string, *PodNetwork) error) error { + networks := podNetwork.Networks + if len(networks) == 0 { + networks = append(networks, plugin.getDefaultNetworkName()) + } + for i, netName := range networks { + // Interface names start at "eth0" and count up for each network + ifName := fmt.Sprintf("eth%d", i) + network, err := plugin.getNetwork(netName) + if err != nil { + logrus.Errorf(err.Error()) + return err + } + if err := forEachFunc(network, ifName, podNetwork); err != nil { + return err + } + } + return nil +} + +func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Result, error) { + if err := plugin.networksAvailable(&podNetwork); err != nil { return nil, err } plugin.podLock(podNetwork).Lock() defer plugin.podUnlock(podNetwork) - _, err := plugin.loNetwork.addToNetwork(podNetwork) + _, err := plugin.loNetwork.addToNetwork(plugin.cacheDir, &podNetwork, "lo") if err != nil { logrus.Errorf("Error while adding to cni lo network: %s", err) return nil, err } - result, err := plugin.getDefaultNetwork().addToNetwork(podNetwork) - if err != nil { - logrus.Errorf("Error while adding to cni network: %s", err) + results := make([]cnitypes.Result, 0) + if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork) error { + result, err := network.addToNetwork(plugin.cacheDir, podNetwork, ifName) + if err != nil { + logrus.Errorf("Error while adding pod to CNI network %q: %s", network.name, err) + return err + } + results = append(results, result) + return nil + }); err != nil { return nil, err } - return result, err + return results, nil } func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error { - if err := plugin.checkInitialized(); err != nil { + if err := plugin.networksAvailable(&podNetwork); err != nil { return err } plugin.podLock(podNetwork).Lock() defer plugin.podUnlock(podNetwork) - return plugin.getDefaultNetwork().deleteFromNetwork(podNetwork) + return plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork) error { + if err := network.deleteFromNetwork(plugin.cacheDir, podNetwork, ifName); err != nil { + logrus.Errorf("Error while removing pod from CNI network %q: %s", network.name, err) + return err + } + return nil + }) } -// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin. -// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls -func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) (string, error) { +// GetPodNetworkStatus returns IP addressing and interface details for all +// networks attached to the pod. +func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]cnitypes.Result, error) { plugin.podLock(podNetwork).Lock() defer plugin.podUnlock(podNetwork) - ip, err := getContainerIP(plugin.nsManager, podNetwork.NetNS, DefaultInterfaceName, "-4") - if err != nil { - ip, err = getContainerIP(plugin.nsManager, podNetwork.NetNS, DefaultInterfaceName, "-6") - } - if err != nil { - return "", err + results := make([]cnitypes.Result, 0) + if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork) error { + version := "4" + ip, mac, err := getContainerDetails(plugin.nsManager, podNetwork.NetNS, ifName, "-4") + if err != nil { + ip, mac, err = getContainerDetails(plugin.nsManager, podNetwork.NetNS, ifName, "-6") + if err != nil { + return err + } + version = "6" + } + + // Until CNI's GET request lands, construct the Result manually + results = append(results, &cnicurrent.Result{ + CNIVersion: "0.3.1", + Interfaces: []*cnicurrent.Interface{ + { + Name: ifName, + Mac: mac.String(), + Sandbox: podNetwork.NetNS, + }, + }, + IPs: []*cnicurrent.IPConfig{ + { + Version: version, + Interface: cnicurrent.Int(0), + Address: *ip, + }, + }, + }) + return nil + }); err != nil { + return nil, err } - return ip.String(), nil + return results, nil } -func (network *cniNetwork) addToNetwork(podNetwork PodNetwork) (cnitypes.Result, error) { - rt, err := buildCNIRuntimeConf(podNetwork) +func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork, ifName string) (cnitypes.Result, error) { + rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName) if err != nil { logrus.Errorf("Error adding network: %v", err) return nil, err @@ -380,8 +509,8 @@ func (network *cniNetwork) addToNetwork(podNetwork PodNetwork) (cnitypes.Result, return res, nil } -func (network *cniNetwork) deleteFromNetwork(podNetwork PodNetwork) error { - rt, err := buildCNIRuntimeConf(podNetwork) +func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNetwork, ifName string) error { + rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName) if err != nil { logrus.Errorf("Error deleting network: %v", err) return err @@ -397,13 +526,14 @@ func (network *cniNetwork) deleteFromNetwork(podNetwork PodNetwork) error { return nil } -func buildCNIRuntimeConf(podNetwork PodNetwork) (*libcni.RuntimeConf, error) { +func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName string) (*libcni.RuntimeConf, error) { logrus.Infof("Got pod network %+v", podNetwork) rt := &libcni.RuntimeConf{ ContainerID: podNetwork.ID, NetNS: podNetwork.NetNS, - IfName: DefaultInterfaceName, + CacheDir: cacheDir, + IfName: ifName, Args: [][2]string{ {"IgnoreUnknown", "1"}, {"K8S_POD_NAMESPACE", podNetwork.Namespace}, @@ -423,5 +553,8 @@ func buildCNIRuntimeConf(podNetwork PodNetwork) (*libcni.RuntimeConf, error) { } func (plugin *cniNetworkPlugin) Status() error { - return plugin.checkInitialized() + if plugin.getDefaultNetwork() == nil { + return errMissingDefaultNetwork + } + return nil } diff --git a/pkg/ocicni/ocicni_suite_test.go b/pkg/ocicni/ocicni_suite_test.go new file mode 100644 index 00000000..d580f36e --- /dev/null +++ b/pkg/ocicni/ocicni_suite_test.go @@ -0,0 +1,13 @@ +package ocicni + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestOcicni(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "ocicni") +} diff --git a/pkg/ocicni/ocicni_test.go b/pkg/ocicni/ocicni_test.go new file mode 100644 index 00000000..b4670821 --- /dev/null +++ b/pkg/ocicni/ocicni_test.go @@ -0,0 +1,426 @@ +package ocicni + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/containernetworking/cni/pkg/version" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func writeConfig(dir, fileName, netName, plugin string) (string, string, error) { + confPath := filepath.Join(dir, fileName) + conf := fmt.Sprintf(`{ + "name": "%s", + "type": "%s", + "cniVersion": "0.3.1" +}`, netName, plugin) + return conf, confPath, ioutil.WriteFile(confPath, []byte(conf), 0644) +} + +type fakePlugin struct { + expectedEnv []string + expectedConf string + result types.Result + err error +} + +type fakeExec struct { + version.PluginDecoder + + addIndex int + delIndex int + plugins []*fakePlugin +} + +func (f *fakeExec) addLoopback() { + f.plugins = append(f.plugins, &fakePlugin{ + expectedConf: `{ + "cniVersion": "0.2.0", + "name": "cni-loopback", + "type": "loopback" +}`, + result: ¤t.Result{ + CNIVersion: "0.2.0", + }, + }) +} + +func (f *fakeExec) addPlugin(expectedEnv []string, expectedConf string, result *current.Result, err error) { + f.plugins = append(f.plugins, &fakePlugin{ + expectedEnv: expectedEnv, + expectedConf: expectedConf, + result: result, + err: err, + }) +} + +func matchArray(a1, a2 []string) { + Expect(len(a1)).To(Equal(len(a2))) + for _, e1 := range a1 { + found := "" + for _, e2 := range a2 { + if e1 == e2 { + found = e2 + break + } + } + // Compare element values for more descriptive test failure + Expect(e1).To(Equal(found)) + } +} + +func getCNICommand(env []string) (string, error) { + for _, e := range env { + parts := strings.Split(e, "=") + if len(parts) == 2 && parts[0] == "CNI_COMMAND" { + return parts[1], nil + } + } + return "", fmt.Errorf("failed to find CNI_COMMAND") +} + +func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + cmd, err := getCNICommand(environ) + Expect(err).NotTo(HaveOccurred()) + var index int + switch cmd { + case "ADD": + Expect(len(f.plugins)).To(BeNumerically(">", f.addIndex)) + index = f.addIndex + f.addIndex++ + case "DEL": + Expect(len(f.plugins)).To(BeNumerically(">", f.delIndex)) + // +1 to skip loopback since it isn't run on DEL + index = f.delIndex + 1 + f.delIndex++ + default: + // Should never be reached + Expect(false).To(BeTrue()) + } + plugin := f.plugins[index] + + GinkgoT().Logf("[%s %d] exec plugin %q found %+v", cmd, index, pluginPath, plugin) + + if plugin.expectedConf != "" { + Expect(string(stdinData)).To(MatchJSON(plugin.expectedConf)) + } + if len(plugin.expectedEnv) > 0 { + matchArray(environ, plugin.expectedEnv) + } + + if plugin.err != nil { + return nil, plugin.err + } + + resultJSON, err := json.Marshal(plugin.result) + Expect(err).NotTo(HaveOccurred()) + return resultJSON, nil +} + +func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { + Expect(len(paths)).To(BeNumerically(">", 0)) + return filepath.Join(paths[0], plugin), nil +} + +func ensureCIDR(cidr string) *net.IPNet { + ip, net, err := net.ParseCIDR(cidr) + Expect(err).NotTo(HaveOccurred()) + net.IP = ip + return net +} + +var _ = Describe("ocicni operations", func() { + var ( + tmpDir string + cacheDir string + ) + + BeforeEach(func() { + var err error + tmpDir, err = ioutil.TempDir("", "ocicni_tmp") + Expect(err).NotTo(HaveOccurred()) + cacheDir, err = ioutil.TempDir("", "ocicni_cache") + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + err := os.RemoveAll(tmpDir) + Expect(err).NotTo(HaveOccurred()) + err = os.RemoveAll(cacheDir) + Expect(err).NotTo(HaveOccurred()) + }) + + It("finds an existing default network configuration", func() { + _, _, err := writeConfig(tmpDir, "5-notdefault.conf", "notdefault", "myplugin") + Expect(err).NotTo(HaveOccurred()) + _, _, err = writeConfig(tmpDir, "10-test.conf", "test", "myplugin") + Expect(err).NotTo(HaveOccurred()) + + ocicni, err := InitCNI("test", tmpDir, "/opt/cni/bin") + Expect(err).NotTo(HaveOccurred()) + Expect(ocicni.Status()).NotTo(HaveOccurred()) + + // Ensure the default network is the one we expect + tmp := ocicni.(*cniNetworkPlugin) + net := tmp.getDefaultNetwork() + Expect(net.name).To(Equal("test")) + Expect(len(net.NetworkConfig.Plugins)).To(BeNumerically(">", 0)) + Expect(net.NetworkConfig.Plugins[0].Network.Type).To(Equal("myplugin")) + + ocicni.Shutdown() + }) + + It("finds an asynchronously written default network configuration", func() { + ocicni, err := InitCNI("test", tmpDir, "/opt/cni/bin") + Expect(err).NotTo(HaveOccurred()) + + // Writing a config that doesn't match the default network + _, _, err = writeConfig(tmpDir, "5-notdefault.conf", "notdefault", "myplugin") + Expect(err).NotTo(HaveOccurred()) + Consistently(ocicni.Status, 5).Should(HaveOccurred()) + + _, _, err = writeConfig(tmpDir, "10-test.conf", "test", "myplugin") + Expect(err).NotTo(HaveOccurred()) + Eventually(ocicni.Status, 5).Should(Succeed()) + + tmp := ocicni.(*cniNetworkPlugin) + net := tmp.getDefaultNetwork() + Expect(net.name).To(Equal("test")) + Expect(len(net.NetworkConfig.Plugins)).To(BeNumerically(">", 0)) + Expect(net.NetworkConfig.Plugins[0].Network.Type).To(Equal("myplugin")) + + ocicni.Shutdown() + }) + + It("finds and refinds an asynchronously written default network configuration", func() { + ocicni, err := InitCNI("test", tmpDir, "/opt/cni/bin") + Expect(err).NotTo(HaveOccurred()) + + // Write the default network config + _, confPath, err := writeConfig(tmpDir, "10-test.conf", "test", "myplugin") + Expect(err).NotTo(HaveOccurred()) + Eventually(ocicni.Status, 5).Should(Succeed()) + + tmp := ocicni.(*cniNetworkPlugin) + net := tmp.getDefaultNetwork() + Expect(net.name).To(Equal("test")) + Expect(len(net.NetworkConfig.Plugins)).To(BeNumerically(">", 0)) + Expect(net.NetworkConfig.Plugins[0].Network.Type).To(Equal("myplugin")) + + // Delete the default network config, ensure ocicni begins to + // return a status error + err = os.Remove(confPath) + Expect(err).NotTo(HaveOccurred()) + Eventually(ocicni.Status, 5).Should(HaveOccurred()) + + // Write the default network config again and wait for status + // to be OK + _, _, err = writeConfig(tmpDir, "10-test.conf", "test", "myplugin") + Expect(err).NotTo(HaveOccurred()) + Eventually(ocicni.Status, 5).Should(Succeed()) + + ocicni.Shutdown() + }) + + It("finds an the asciibetically first network configuration as default if given no default network name", func() { + ocicni, err := InitCNI("", tmpDir, "/opt/cni/bin") + Expect(err).NotTo(HaveOccurred()) + + _, _, err = writeConfig(tmpDir, "10-test.conf", "test", "myplugin") + Expect(err).NotTo(HaveOccurred()) + _, _, err = writeConfig(tmpDir, "5-notdefault.conf", "notdefault", "myplugin") + Expect(err).NotTo(HaveOccurred()) + + Eventually(ocicni.Status, 5).Should(Succeed()) + + tmp := ocicni.(*cniNetworkPlugin) + net := tmp.getDefaultNetwork() + Expect(net.name).To(Equal("test")) + Expect(len(net.NetworkConfig.Plugins)).To(BeNumerically(">", 0)) + Expect(net.NetworkConfig.Plugins[0].Network.Type).To(Equal("myplugin")) + + ocicni.Shutdown() + }) + + It("returns correct default network from loadNetworks()", func() { + // Writing a config that doesn't match the default network + _, _, err := writeConfig(tmpDir, "5-network1.conf", "network1", "myplugin") + Expect(err).NotTo(HaveOccurred()) + _, _, err = writeConfig(tmpDir, "10-network2.conf", "network2", "myplugin") + Expect(err).NotTo(HaveOccurred()) + _, _, err = writeConfig(tmpDir, "30-network3.conf", "network3", "myplugin") + Expect(err).NotTo(HaveOccurred()) + _, _, err = writeConfig(tmpDir, "afdsfdsafdsa-network3.conf", "network4", "myplugin") + Expect(err).NotTo(HaveOccurred()) + + netMap, defname, err := loadNetworks(nil, tmpDir, []string{"/opt/cni/bin"}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(netMap)).To(Equal(4)) + // filenames are sorted asciibetically + Expect(defname).To(Equal("network2")) + }) + + It("returns no error from loadNetworks() when no config files exist", func() { + netMap, defname, err := loadNetworks(nil, tmpDir, []string{"/opt/cni/bin"}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(netMap)).To(Equal(0)) + // filenames are sorted asciibetically + Expect(defname).To(Equal("")) + }) + + It("ignores subsequent duplicate network names in loadNetworks()", func() { + // Writing a config that doesn't match the default network + _, _, err := writeConfig(tmpDir, "10-network2.conf", "network2", "myplugin") + Expect(err).NotTo(HaveOccurred()) + _, _, err = writeConfig(tmpDir, "30-network3.conf", "network3", "myplugin") + Expect(err).NotTo(HaveOccurred()) + _, _, err = writeConfig(tmpDir, "5-network1.conf", "network2", "myplugin2") + Expect(err).NotTo(HaveOccurred()) + + netMap, _, err := loadNetworks(nil, tmpDir, []string{"/opt/cni/bin"}) + Expect(err).NotTo(HaveOccurred()) + + // We expect the type=myplugin network to be ignored since it + // was read earlier than the type=myplugin2 network with the same name + Expect(len(netMap)).To(Equal(2)) + net, ok := netMap["network2"] + Expect(ok).To(BeTrue()) + Expect(net.NetworkConfig.Plugins[0].Network.Type).To(Equal("myplugin2")) + }) + + It("sets up and tears down a pod using the default network", func() { + conf, _, err := writeConfig(tmpDir, "10-network2.conf", "network2", "myplugin") + Expect(err).NotTo(HaveOccurred()) + + fake := &fakeExec{} + fake.addLoopback() + expectedResult := ¤t.Result{ + CNIVersion: "0.3.1", + Interfaces: []*current.Interface{ + { + Name: "eth0", + Mac: "01:23:45:67:89:01", + Sandbox: "/foo/bar/netns", + }, + }, + IPs: []*current.IPConfig{ + { + Interface: current.Int(0), + Version: "4", + Address: *ensureCIDR("1.1.1.2/24"), + }, + }, + } + fake.addPlugin(nil, conf, expectedResult, nil) + + ocicni, err := initCNI(fake, cacheDir, "network2", tmpDir, "/opt/cni/bin") + Expect(err).NotTo(HaveOccurred()) + + podNet := PodNetwork{ + Name: "pod1", + Namespace: "namespace1", + ID: "1234567890", + NetNS: "/foo/bar/netns", + } + results, err := ocicni.SetUpPod(podNet) + Expect(err).NotTo(HaveOccurred()) + Expect(fake.addIndex).To(Equal(len(fake.plugins))) + Expect(len(results)).To(Equal(1)) + r := results[0].(*current.Result) + Expect(reflect.DeepEqual(r, expectedResult)).To(BeTrue()) + + err = ocicni.TearDownPod(podNet) + Expect(err).NotTo(HaveOccurred()) + // -1 because loopback doesn't get torn down + Expect(fake.delIndex).To(Equal(len(fake.plugins) - 1)) + + ocicni.Shutdown() + }) + + It("sets up and tears down a pod using specified networks", func() { + _, _, err := writeConfig(tmpDir, "10-network2.conf", "network2", "myplugin") + Expect(err).NotTo(HaveOccurred()) + + conf1, _, err := writeConfig(tmpDir, "20-network3.conf", "network3", "myplugin") + Expect(err).NotTo(HaveOccurred()) + conf2, _, err := writeConfig(tmpDir, "30-network4.conf", "network4", "myplugin") + Expect(err).NotTo(HaveOccurred()) + + fake := &fakeExec{} + fake.addLoopback() + expectedResult1 := ¤t.Result{ + CNIVersion: "0.3.1", + Interfaces: []*current.Interface{ + { + Name: "eth0", + Mac: "01:23:45:67:89:01", + Sandbox: "/foo/bar/netns", + }, + }, + IPs: []*current.IPConfig{ + { + Interface: current.Int(0), + Version: "4", + Address: *ensureCIDR("1.1.1.2/24"), + }, + }, + } + fake.addPlugin(nil, conf1, expectedResult1, nil) + + expectedResult2 := ¤t.Result{ + CNIVersion: "0.3.1", + Interfaces: []*current.Interface{ + { + Name: "eth1", + Mac: "01:23:45:67:89:02", + Sandbox: "/foo/bar/netns", + }, + }, + IPs: []*current.IPConfig{ + { + Interface: current.Int(0), + Version: "4", + Address: *ensureCIDR("1.1.1.3/24"), + }, + }, + } + fake.addPlugin(nil, conf2, expectedResult2, nil) + + ocicni, err := initCNI(fake, cacheDir, "network2", tmpDir, "/opt/cni/bin") + Expect(err).NotTo(HaveOccurred()) + + podNet := PodNetwork{ + Name: "pod1", + Namespace: "namespace1", + ID: "1234567890", + NetNS: "/foo/bar/netns", + Networks: []string{"network3", "network4"}, + } + results, err := ocicni.SetUpPod(podNet) + Expect(err).NotTo(HaveOccurred()) + Expect(fake.addIndex).To(Equal(len(fake.plugins))) + Expect(len(results)).To(Equal(2)) + r := results[0].(*current.Result) + Expect(reflect.DeepEqual(r, expectedResult1)).To(BeTrue()) + r = results[1].(*current.Result) + Expect(reflect.DeepEqual(r, expectedResult2)).To(BeTrue()) + + err = ocicni.TearDownPod(podNet) + Expect(err).NotTo(HaveOccurred()) + // -1 because loopback doesn't get torn down + Expect(fake.delIndex).To(Equal(len(fake.plugins) - 1)) + + ocicni.Shutdown() + }) +}) diff --git a/pkg/ocicni/types.go b/pkg/ocicni/types.go index 39e9b591..8ca61657 100644 --- a/pkg/ocicni/types.go +++ b/pkg/ocicni/types.go @@ -36,6 +36,10 @@ type PodNetwork struct { NetNS string // PortMappings is the port mapping of the sandbox. PortMappings []PortMapping + + // Networks is a list of CNI network names to attach to the sandbox + // Leave this list empty to attach the default network to the sandbox + Networks []string } // CNIPlugin is the interface that needs to be implemented by a plugin @@ -47,14 +51,17 @@ type CNIPlugin interface { // SetUpPod is the method called after the sandbox container of // the pod has been created but before the other containers of the // pod are launched. - SetUpPod(network PodNetwork) (types.Result, error) + SetUpPod(network PodNetwork) ([]types.Result, error) // TearDownPod is the method called before a pod's sandbox container will be deleted TearDownPod(network PodNetwork) error // Status is the method called to obtain the ipv4 or ipv6 addresses of the pod sandbox - GetPodNetworkStatus(network PodNetwork) (string, error) + GetPodNetworkStatus(network PodNetwork) ([]types.Result, error) // NetworkStatus returns error if the network plugin is in error state Status() error + + // Shutdown terminates all driver operations + Shutdown() error } diff --git a/pkg/ocicni/types_unix.go b/pkg/ocicni/types_unix.go index 21e713ff..88010f73 100644 --- a/pkg/ocicni/types_unix.go +++ b/pkg/ocicni/types_unix.go @@ -3,10 +3,8 @@ package ocicni const ( - // DefaultNetDir is the place to look for CNI Network - DefaultNetDir = "/etc/cni/net.d" - // DefaultCNIDir is the place to look for cni config files - DefaultCNIDir = "/opt/cni/bin" - // VendorCNIDirTemplate is the template for looking up vendor specific cni config/executable files - VendorCNIDirTemplate = "%s/opt/%s/bin" + // DefaultConfDir is the default place to look for CNI Network + DefaultConfDir = "/etc/cni/net.d" + // DefaultBinDir is the default place to look for CNI config files + DefaultBinDir = "/opt/cni/bin" ) diff --git a/pkg/ocicni/types_windows.go b/pkg/ocicni/types_windows.go index f8b434c1..061ecae5 100644 --- a/pkg/ocicni/types_windows.go +++ b/pkg/ocicni/types_windows.go @@ -3,10 +3,8 @@ package ocicni const ( - // DefaultNetDir is the place to look for CNI Network - DefaultNetDir = "C:\\cni\\etc\\net.d" - // DefaultCNIDir is the place to look for cni config files - DefaultCNIDir = "C:\\cni\\bin" - // VendorCNIDirTemplate is the template for looking up vendor specific cni config/executable files - VendorCNIDirTemplate = "C:\\cni\\%s\\opt\\%s\\bin" // XXX(vbatts) Not sure what to do here ... + // DefaultConfDir is the default place to look for CNI Network + DefaultConfDir = "C:\\cni\\etc\\net.d" + // DefaultBinDir is the default place to look for cni config files + DefaultBinDir = "C:\\cni\\bin" ) diff --git a/pkg/ocicni/util_linux.go b/pkg/ocicni/util_linux.go index de903a2b..d263ae4d 100644 --- a/pkg/ocicni/util_linux.go +++ b/pkg/ocicni/util_linux.go @@ -21,26 +21,51 @@ func (nsm *nsManager) init() error { return err } -func getContainerIP(nsm *nsManager, netnsPath, interfaceName, addrType string) (net.IP, error) { +func getContainerDetails(nsm *nsManager, netnsPath, interfaceName, addrType string) (*net.IPNet, *net.HardwareAddr, error) { // Try to retrieve ip inside container network namespace output, err := exec.Command(nsm.nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--", "ip", "-o", addrType, "addr", "show", "dev", interfaceName, "scope", "global").CombinedOutput() if err != nil { - return nil, fmt.Errorf("Unexpected command output %s with error: %v", output, err) + return nil, nil, fmt.Errorf("Unexpected command output %s with error: %v", output, err) } lines := strings.Split(string(output), "\n") if len(lines) < 1 { - return nil, fmt.Errorf("Unexpected command output %s", output) + return nil, nil, fmt.Errorf("Unexpected command output %s", output) } fields := strings.Fields(lines[0]) if len(fields) < 4 { - return nil, fmt.Errorf("Unexpected address output %s ", lines[0]) + return nil, nil, fmt.Errorf("Unexpected address output %s ", lines[0]) } - ip, _, err := net.ParseCIDR(fields[3]) + ip, ipNet, err := net.ParseCIDR(fields[3]) if err != nil { - return nil, fmt.Errorf("CNI failed to parse ip from output %s due to %v", output, err) + return nil, nil, fmt.Errorf("CNI failed to parse ip from output %s due to %v", output, err) + } + if ip.To4() == nil { + ipNet.IP = ip + } else { + ipNet.IP = ip.To4() + } + + // Try to retrieve MAC inside container network namespace + output, err = exec.Command(nsm.nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--", + "ip", "link", "show", "dev", interfaceName).CombinedOutput() + if err != nil { + return nil, nil, fmt.Errorf("unexpected 'ip link' command output %s with error: %v", output, err) + } + + lines = strings.Split(string(output), "\n") + if len(lines) < 2 { + return nil, nil, fmt.Errorf("unexpected 'ip link' command output %s", output) + } + fields = strings.Fields(lines[1]) + if len(fields) < 4 { + return nil, nil, fmt.Errorf("unexpected link output %s ", lines[0]) + } + mac, err := net.ParseMAC(fields[1]) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse MAC from output %s due to %v", output, err) } - return ip, nil + return ipNet, &mac, nil } diff --git a/pkg/ocicni/util_unsupported.go b/pkg/ocicni/util_unsupported.go index 06295902..0b1c7220 100644 --- a/pkg/ocicni/util_unsupported.go +++ b/pkg/ocicni/util_unsupported.go @@ -14,6 +14,6 @@ func (nsm *nsManager) init() error { return nil } -func getContainerIP(nsm *nsManager, netnsPath, interfaceName, addrType string) (net.IP, error) { - return nil, fmt.Errorf("not supported yet") +func getContainerDetails(nsm *nsManager, netnsPath, interfaceName, addrType string) (*net.IPNet, *net.HardwareAddr, error) { + return nil, nil, fmt.Errorf("not supported yet") } diff --git a/tools/ocicnitool/ocicnitool.go b/tools/ocicnitool/ocicnitool.go index 2e7caca5..a65813b3 100644 --- a/tools/ocicnitool/ocicnitool.go +++ b/tools/ocicnitool/ocicnitool.go @@ -1,12 +1,15 @@ package main import ( + "flag" "fmt" "os" "path/filepath" + "strings" - "github.com/cri-o/ocicni/pkg/ocicni" + cnitypes "github.com/containernetworking/cni/pkg/types" cnicurrent "github.com/containernetworking/cni/pkg/types/current" + "github.com/cri-o/ocicni/pkg/ocicni" ) const ( @@ -21,9 +24,49 @@ const ( CmdDel = "del" ) +func printSandboxResults(results []cnitypes.Result) { + for _, r := range results { + result, _ := cnicurrent.NewResultFromResult(r) + if result != nil { + result030, _ := cnicurrent.GetResult(result) + if result030 != nil { + for _, ip := range result030.IPs { + intfDetails := "" + if ip.Interface != nil && *ip.Interface >= 0 && *ip.Interface < len(result030.Interfaces) { + intf := result030.Interfaces[*ip.Interface] + // Only print container sandbox interfaces (not host ones) + if intf.Sandbox != "" { + intfDetails = fmt.Sprintf(" (%s %s)", intf.Name, intf.Mac) + } + } + fmt.Fprintf(os.Stdout, "IP: %s%s\n", ip.Address.String(), intfDetails) + } + } + } + } +} + func main() { - if len(os.Args) < 6 { - usage() + networksStr := flag.String("networks", "", "comma-separated list of CNI network names (optional)") + flag.Parse() + networks := make([]string, 0) + for _, name := range strings.Split(*networksStr, ",") { + if len(name) > 0 { + networks = append(networks, name) + } + } + + flag.Usage = func() { + exe := filepath.Base(os.Args[0]) + + fmt.Fprintf(os.Stderr, "%s: Add or remove CNI networks from a network namespace\n", exe) + fmt.Fprintf(os.Stderr, " %s [-networks name[,name...]] %s \n", exe, CmdAdd) + fmt.Fprintf(os.Stderr, " %s [-networks name[,name...]] %s \n", exe, CmdStatus) + fmt.Fprintf(os.Stderr, " %s [-networks name[,name...]] %s \n", exe, CmdDel) + } + + if len(flag.Args()) < 5 { + flag.Usage() return } @@ -36,60 +79,37 @@ func main() { bindir = DefaultBinDir } - plugin, err := ocicni.InitCNI(confdir, bindir) + plugin, err := ocicni.InitCNI("", confdir, bindir) if err != nil { exit(err) } podNetwork := ocicni.PodNetwork{ - Namespace: os.Args[2], - Name: os.Args[3], - ID: os.Args[4], - NetNS: os.Args[5], + Namespace: flag.Args()[1], + Name: flag.Args()[2], + ID: flag.Args()[3], + NetNS: flag.Args()[4], + Networks: networks, } - switch os.Args[1] { + switch flag.Args()[0] { case CmdAdd: - result, err := plugin.SetUpPod(podNetwork) - if result != nil { - var result030 *cnicurrent.Result - result030, err = cnicurrent.GetResult(result) - if result030 != nil { - for _, ip := range result030.IPs { - intfDetails := "" - if *ip.Interface >= 0 && *ip.Interface < len(result030.Interfaces) { - intf := result030.Interfaces[*ip.Interface] - // Only print container sandbox interfaces (not host ones) - if intf.Sandbox != "" { - intfDetails = fmt.Sprintf(" (%s %s)", intf.Name, intf.Mac) - } - } - fmt.Fprintf(os.Stdout, "IP: %s%s\n", ip.Address.String(), intfDetails) - } - } + results, err := plugin.SetUpPod(podNetwork) + if err == nil { + printSandboxResults(results) } exit(err) case CmdStatus: - ip, err := plugin.GetPodNetworkStatus(podNetwork) - if err != nil { - exit(err) + results, err := plugin.GetPodNetworkStatus(podNetwork) + if err == nil { + printSandboxResults(results) } - fmt.Fprintf(os.Stdout, "IP: %s\n", ip) + exit(err) case CmdDel: exit(plugin.TearDownPod(podNetwork)) } } -func usage() { - exe := filepath.Base(os.Args[0]) - - fmt.Fprintf(os.Stderr, "%s: Add or remove network interfaces from a network namespace\n", exe) - fmt.Fprintf(os.Stderr, " %s %s \n", exe, CmdAdd) - fmt.Fprintf(os.Stderr, " %s %s \n", exe, CmdStatus) - fmt.Fprintf(os.Stderr, " %s %s \n", exe, CmdDel) - os.Exit(1) -} - func exit(err error) { if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go index a23cbb2c..d494e43d 100644 --- a/vendor/github.com/containernetworking/cni/libcni/api.go +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -15,7 +15,11 @@ package libcni import ( + "encoding/json" + "fmt" + "io/ioutil" "os" + "path/filepath" "strings" "github.com/containernetworking/cni/pkg/invoke" @@ -23,6 +27,14 @@ import ( "github.com/containernetworking/cni/pkg/version" ) +var ( + CacheDir = "/var/lib/cni" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. type RuntimeConf struct { ContainerID string NetNS string @@ -34,6 +46,9 @@ type RuntimeConf struct { // in this map which match the capabilities of the plugin are passed // to the plugin CapabilityArgs map[string]interface{} + + // A cache directory in which to library data. Defaults to CacheDir + CacheDir string } type NetworkConfig struct { @@ -50,25 +65,38 @@ type NetworkConfigList struct { type CNI interface { AddNetworkList(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + GetNetworkList(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) DelNetworkList(net *NetworkConfigList, rt *RuntimeConf) error AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + GetNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) DelNetwork(net *NetworkConfig, rt *RuntimeConf) error } type CNIConfig struct { Path []string + exec invoke.Exec } // CNIConfig implements the CNI interface var _ CNI = &CNIConfig{} -func buildOneConfig(list *NetworkConfigList, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { var err error inject := map[string]interface{}{ - "name": list.Name, - "cniVersion": list.CNIVersion, + "name": name, + "cniVersion": cniVersion, } // Add previous plugin result if prevResult != nil { @@ -119,21 +147,37 @@ func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, return orig, nil } -// AddNetworkList executes a sequence of plugins with the ADD command -func (c *CNIConfig) AddNetworkList(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { - var prevResult types.Result - for _, net := range list.Plugins { - pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path) - if err != nil { - return nil, err +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, } + } + return c.exec +} - newConf, err := buildOneConfig(list, net, prevResult, rt) - if err != nil { - return nil, err - } +func (c *CNIConfig) addOrGetNetwork(command, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } - prevResult, err = invoke.ExecPluginWithResult(pluginPath, newConf.Bytes, c.args("ADD", rt)) + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(pluginPath, newConf.Bytes, c.args(command, rt), c.exec) +} + +// Note that only GET requests should pass an initial prevResult +func (c *CNIConfig) addOrGetNetworkList(command string, prevResult types.Result, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + for _, net := range list.Plugins { + prevResult, err = c.addOrGetNetwork(command, list.Name, list.CNIVersion, net, prevResult, rt) if err != nil { return nil, err } @@ -142,68 +186,194 @@ func (c *CNIConfig) AddNetworkList(list *NetworkConfigList, rt *RuntimeConf) (ty return prevResult, nil } +func getResultCacheFilePath(netName string, rt *RuntimeConf) string { + cacheDir := rt.CacheDir + if cacheDir == "" { + cacheDir = CacheDir + } + return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s", netName, rt.ContainerID)) +} + +func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error { + data, err := json.Marshal(result) + if err != nil { + return err + } + fname := getResultCacheFilePath(netName, rt) + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return err + } + return ioutil.WriteFile(fname, data, 0600) +} + +func delCachedResult(netName string, rt *RuntimeConf) error { + fname := getResultCacheFilePath(netName, rt) + return os.Remove(fname) +} + +func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname := getResultCacheFilePath(netName, rt) + data, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Read the version of the cached result + decoder := version.ConfigDecoder{} + resultCniVersion, err := decoder.Decode(data) + if err != nil { + return nil, err + } + + // Ensure we can understand the result + result, err := version.NewResult(resultCniVersion, data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil && resultCniVersion != cniVersion { + return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err) + } + return result, err +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + result, err := c.addOrGetNetworkList("ADD", nil, list, rt) + if err != nil { + return nil, err + } + + if err = setCachedResult(result, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network '%s' cached result: %v", list.Name, err) + } + + return result, nil +} + +// GetNetworkList executes a sequence of plugins with the GET command +func (c *CNIConfig) GetNetworkList(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + // GET was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return nil, err + } else if !gtet { + return nil, fmt.Errorf("configuration version %q does not support the GET command", list.CNIVersion) + } + + cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return nil, fmt.Errorf("failed to get network '%s' cached result: %v", list.Name, err) + } + return c.addOrGetNetworkList("GET", cachedResult, list, rt) +} + +func (c *CNIConfig) delNetwork(name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + // DelNetworkList executes a sequence of plugins with the DEL command func (c *CNIConfig) DelNetworkList(list *NetworkConfigList, rt *RuntimeConf) error { - for i := len(list.Plugins) - 1; i >= 0; i-- { - net := list.Plugins[i] - - pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path) - if err != nil { - return err - } + var cachedResult types.Result - newConf, err := buildOneConfig(list, net, nil, rt) + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt) if err != nil { - return err + return fmt.Errorf("failed to get network '%s' cached result: %v", list.Name, err) } + } - if err := invoke.ExecPluginWithoutResult(pluginPath, newConf.Bytes, c.args("DEL", rt)); err != nil { + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { return err } } + _ = delCachedResult(list.Name, rt) return nil } // AddNetwork executes the plugin with the ADD command func (c *CNIConfig) AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { - pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path) + result, err := c.addOrGetNetwork("ADD", net.Network.Name, net.Network.CNIVersion, net, nil, rt) if err != nil { return nil, err } - net, err = injectRuntimeConfig(net, rt) - if err != nil { + if err = setCachedResult(result, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network '%s' cached result: %v", net.Network.Name, err) + } + + return result, nil +} + +// GetNetwork executes the plugin with the GET command +func (c *CNIConfig) GetNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + // GET was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { return nil, err + } else if !gtet { + return nil, fmt.Errorf("configuration version %q does not support the GET command", net.Network.CNIVersion) } - return invoke.ExecPluginWithResult(pluginPath, net.Bytes, c.args("ADD", rt)) + cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return nil, fmt.Errorf("failed to get network '%s' cached result: %v", net.Network.Name, err) + } + return c.addOrGetNetwork("GET", net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) } // DelNetwork executes the plugin with the DEL command func (c *CNIConfig) DelNetwork(net *NetworkConfig, rt *RuntimeConf) error { - pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path) - if err != nil { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { return err + } else if gtet { + cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network '%s' cached result: %v", net.Network.Name, err) + } } - net, err = injectRuntimeConfig(net, rt) - if err != nil { + if err := c.delNetwork(net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { return err } - - return invoke.ExecPluginWithoutResult(pluginPath, net.Bytes, c.args("DEL", rt)) + _ = delCachedResult(net.Network.Name, rt) + return nil } // GetVersionInfo reports which versions of the CNI spec are supported by // the given plugin. func (c *CNIConfig) GetVersionInfo(pluginType string) (version.PluginInfo, error) { - pluginPath, err := invoke.FindInPath(pluginType, c.Path) + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) if err != nil { return nil, err } - return invoke.GetVersionInfo(pluginPath) + return invoke.GetVersionInfo(pluginPath, c.exec) } // ===== diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go index c7738c66..9834d715 100644 --- a/vendor/github.com/containernetworking/cni/libcni/conf.go +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -45,6 +45,9 @@ func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { if err := json.Unmarshal(bytes, &conf.Network); err != nil { return nil, fmt.Errorf("error parsing configuration: %s", err) } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } return conf, nil } diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go index c78a69ee..21efdf80 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -22,32 +22,54 @@ import ( "github.com/containernetworking/cni/pkg/types" ) -func DelegateAdd(delegatePlugin string, netconf []byte) (types.Result, error) { - if os.Getenv("CNI_COMMAND") != "ADD" { - return nil, fmt.Errorf("CNI_COMMAND is not ADD") +func delegateAddOrGet(command, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec } paths := filepath.SplitList(os.Getenv("CNI_PATH")) - - pluginPath, err := FindInPath(delegatePlugin, paths) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) if err != nil { return nil, err } - return ExecPluginWithResult(pluginPath, netconf, ArgsFromEnv()) + return ExecPluginWithResult(pluginPath, netconf, ArgsFromEnv(), exec) +} + +// DelegateAdd calls the given delegate plugin with the CNI ADD action and +// JSON configuration +func DelegateAdd(delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + if os.Getenv("CNI_COMMAND") != "ADD" { + return nil, fmt.Errorf("CNI_COMMAND is not ADD") + } + return delegateAddOrGet("ADD", delegatePlugin, netconf, exec) +} + +// DelegateGet calls the given delegate plugin with the CNI GET action and +// JSON configuration +func DelegateGet(delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + if os.Getenv("CNI_COMMAND") != "GET" { + return nil, fmt.Errorf("CNI_COMMAND is not GET") + } + return delegateAddOrGet("GET", delegatePlugin, netconf, exec) } -func DelegateDel(delegatePlugin string, netconf []byte) error { +// DelegateDel calls the given delegate plugin with the CNI DEL action and +// JSON configuration +func DelegateDel(delegatePlugin string, netconf []byte, exec Exec) error { + if exec == nil { + exec = defaultExec + } + if os.Getenv("CNI_COMMAND") != "DEL" { return fmt.Errorf("CNI_COMMAND is not DEL") } paths := filepath.SplitList(os.Getenv("CNI_PATH")) - - pluginPath, err := FindInPath(delegatePlugin, paths) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) if err != nil { return err } - return ExecPluginWithoutResult(pluginPath, netconf, ArgsFromEnv()) + return ExecPluginWithoutResult(pluginPath, netconf, ArgsFromEnv(), exec) } diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go index fc47e7c8..cf019d3a 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -22,34 +22,62 @@ import ( "github.com/containernetworking/cni/pkg/version" ) -func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs) (types.Result, error) { - return defaultPluginExec.WithResult(pluginPath, netconf, args) +// Exec is an interface encapsulates all operations that deal with finding +// and executing a CNI plugin. Tests may provide a fake implementation +// to avoid writing fake plugins to temporary directories during the test. +type Exec interface { + ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) + FindInPath(plugin string, paths []string) (string, error) + Decode(jsonBytes []byte) (version.PluginInfo, error) } -func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs) error { - return defaultPluginExec.WithoutResult(pluginPath, netconf, args) -} - -func GetVersionInfo(pluginPath string) (version.PluginInfo, error) { - return defaultPluginExec.GetVersionInfo(pluginPath) -} - -var defaultPluginExec = &PluginExec{ - RawExec: &RawExec{Stderr: os.Stderr}, - VersionDecoder: &version.PluginDecoder{}, -} +// For example, a testcase could pass an instance of the following fakeExec +// object to ExecPluginWithResult() to verify the incoming stdin and environment +// and provide a tailored response: +// +//import ( +// "encoding/json" +// "path" +// "strings" +//) +// +//type fakeExec struct { +// version.PluginDecoder +//} +// +//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// net := &types.NetConf{} +// err := json.Unmarshal(stdinData, net) +// if err != nil { +// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) +// } +// pluginName := path.Base(pluginPath) +// if pluginName != net.Type { +// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) +// } +// for _, e := range environ { +// // Check environment for forced failure request +// parts := strings.Split(e, "=") +// if len(parts) > 0 && parts[0] == "FAIL" { +// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) +// } +// } +// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil +//} +// +//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// if len(paths) > 0 { +// return path.Join(paths[0], plugin), nil +// } +// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) +//} -type PluginExec struct { - RawExec interface { - ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) +func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec } - VersionDecoder interface { - Decode(jsonBytes []byte) (version.PluginInfo, error) - } -} -func (e *PluginExec) WithResult(pluginPath string, netconf []byte, args CNIArgs) (types.Result, error) { - stdoutBytes, err := e.RawExec.ExecPlugin(pluginPath, netconf, args.AsEnv()) + stdoutBytes, err := exec.ExecPlugin(pluginPath, netconf, args.AsEnv()) if err != nil { return nil, err } @@ -64,8 +92,11 @@ func (e *PluginExec) WithResult(pluginPath string, netconf []byte, args CNIArgs) return version.NewResult(confVersion, stdoutBytes) } -func (e *PluginExec) WithoutResult(pluginPath string, netconf []byte, args CNIArgs) error { - _, err := e.RawExec.ExecPlugin(pluginPath, netconf, args.AsEnv()) +func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { + if exec == nil { + exec = defaultExec + } + _, err := exec.ExecPlugin(pluginPath, netconf, args.AsEnv()) return err } @@ -73,7 +104,10 @@ func (e *PluginExec) WithoutResult(pluginPath string, netconf []byte, args CNIAr // For recent-enough plugins, it uses the information returned by the VERSION // command. For older plugins which do not recognize that command, it reports // version 0.1.0 -func (e *PluginExec) GetVersionInfo(pluginPath string) (version.PluginInfo, error) { +func GetVersionInfo(pluginPath string, exec Exec) (version.PluginInfo, error) { + if exec == nil { + exec = defaultExec + } args := &Args{ Command: "VERSION", @@ -83,7 +117,7 @@ func (e *PluginExec) GetVersionInfo(pluginPath string) (version.PluginInfo, erro Path: "dummy", } stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) - stdoutBytes, err := e.RawExec.ExecPlugin(pluginPath, stdin, args.AsEnv()) + stdoutBytes, err := exec.ExecPlugin(pluginPath, stdin, args.AsEnv()) if err != nil { if err.Error() == "unknown CNI_COMMAND: VERSION" { return version.PluginSupports("0.1.0"), nil @@ -91,5 +125,19 @@ func (e *PluginExec) GetVersionInfo(pluginPath string) (version.PluginInfo, erro return nil, err } - return e.VersionDecoder.Decode(stdoutBytes) + return exec.Decode(stdoutBytes) +} + +// DefaultExec is an object that implements the Exec interface which looks +// for and executes plugins from disk. +type DefaultExec struct { + *RawExec + version.PluginDecoder +} + +// DefaultExec implements the Exec interface +var _ Exec = &DefaultExec{} + +var defaultExec = &DefaultExec{ + RawExec: &RawExec{Stderr: os.Stderr}, } diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go index 93f1e75d..a598f09c 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -57,3 +57,7 @@ func pluginErr(err error, output []byte) error { return err } + +func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { + return FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go index caac92ba..92980c1a 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go @@ -24,9 +24,9 @@ import ( "github.com/containernetworking/cni/pkg/types/020" ) -const ImplementedSpecVersion string = "0.3.1" +const ImplementedSpecVersion string = "0.4.0" -var SupportedVersions = []string{"0.3.0", ImplementedSpecVersion} +var SupportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} func NewResult(data []byte) (types.Result, error) { result := &Result{} @@ -196,7 +196,7 @@ func (r *Result) Version() string { func (r *Result) GetAsVersion(version string) (types.Result, error) { switch version { - case "0.3.0", ImplementedSpecVersion: + case "0.3.0", "0.3.1", ImplementedSpecVersion: r.CNIVersion = version return r, nil case types020.SupportedVersions[0], types020.SupportedVersions[1], types020.SupportedVersions[2]: diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go index 64127560..4684a320 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -63,10 +63,12 @@ type NetConf struct { Name string `json:"name,omitempty"` Type string `json:"type,omitempty"` Capabilities map[string]bool `json:"capabilities,omitempty"` - IPAM struct { - Type string `json:"type,omitempty"` - } `json:"ipam,omitempty"` - DNS DNS `json:"dns"` + IPAM IPAM `json:"ipam,omitempty"` + DNS DNS `json:"dns"` +} + +type IPAM struct { + Type string `json:"type,omitempty"` } // NetConfList describes an ordered list of networks. @@ -167,7 +169,7 @@ func (r *Route) UnmarshalJSON(data []byte) error { return nil } -func (r *Route) MarshalJSON() ([]byte, error) { +func (r Route) MarshalJSON() ([]byte, error) { rt := route{ Dst: IPNet(r.Dst), GW: r.GW, diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go index 8a467281..612335a8 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -18,6 +18,8 @@ import ( "encoding/json" "fmt" "io" + "strconv" + "strings" ) // PluginInfo reports information about CNI versioning @@ -79,3 +81,60 @@ func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { } return &info, nil } + +// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, +// minor, and micro numbers or returns an error +func ParseVersion(version string) (int, int, int, error) { + var major, minor, micro int + parts := strings.Split(version, ".") + if len(parts) == 0 || len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many or too few parts", version) + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %v", parts[0], err) + } + + if len(parts) >= 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %v", parts[1], err) + } + } + + if len(parts) >= 3 { + micro, err = strconv.Atoi(parts[2]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %v", parts[2], err) + } + } + + return major, minor, micro, nil +} + +// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro +// nubmers, and compares them to determine whether the first version is greater +// than or equal to the second +func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro >= secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go index efe8ea87..c8e46d55 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/version.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -24,7 +24,7 @@ import ( // Current reports the version of the CNI spec implemented by this library func Current() string { - return "0.3.1" + return "0.4.0" } // Legacy PluginInfo describes a plugin that is backwards compatible with the @@ -35,7 +35,7 @@ func Current() string { // Any future CNI spec versions which meet this definition should be added to // this list. var Legacy = PluginSupports("0.1.0", "0.2.0") -var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1") +var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0") var resultFactories = []struct { supportedVersions []string diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/.gitignore new file mode 100644 index 00000000..922b4f7f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +TODO +tmp/**/* +*.coverprofile \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml new file mode 100644 index 00000000..f0e67b84 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.3 + - 1.4 + - 1.5 + - tip + +install: + - go get -v -t ./... + - go get golang.org/x/tools/cmd/cover + - go get github.com/onsi/gomega + - go install github.com/onsi/ginkgo/ginkgo + - export PATH=$PATH:$HOME/gopath/bin + +script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md new file mode 100644 index 00000000..438c8c1e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -0,0 +1,136 @@ +## HEAD + +Improvements: + +- `Skip(message)` can be used to skip the current test. +- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests) + +Bug Fixes: + +- Ginkgo tests now fail when you `panic(nil)` (#167) + +## 1.2.0 5/31/2015 + +Improvements + +- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160) +- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152) +- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166) + +## 1.2.0-beta + +Ginkgo now requires Go 1.4+ + +Improvements: + +- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does. +- Improved focus behavior. Now, this: + + ```golang + FDescribe("Some describe", func() { + It("A", func() {}) + + FIt("B", func() {}) + }) + ``` + + will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests. +- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests. +- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs. +- Improved output when an error occurs in a setup or teardown block. +- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order. +- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`. +- Add support for precompiled tests: + - `ginkgo build ` will now compile the package, producing a file named `package.test` + - The compiled `package.test` file can be run directly. This runs the tests in series. + - To run precompiled tests in parallel, you can run: `ginkgo -p package.test` +- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs. +- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory +- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump. +- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+. +- `ginkgo -notify` now works on Linux + +Bug Fixes: + +- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0. +- Fix tempfile leak when running in parallel +- Fix incorrect failure message when a panic occurs during a parallel test run +- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests. +- Be more consistent about handling SIGTERM as well as SIGINT +- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts. +- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!) + +## 1.1.0 (8/2/2014) + +No changes, just dropping the beta. + +## 1.1.0-beta (7/22/2014) +New Features: + +- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag. +- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass. This allows CI systems to detect accidental commits of focused test suites. +- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes. +- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command. +- `ginkgo --failFast` aborts the test suite after the first failure. +- `ginkgo generate file_1 file_2` can take multiple file arguments. +- Ginkgo now summarizes any spec failures that occured at the end of the test run. +- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed. + +Improvements: + +- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped. +- `ginkgo --untilItFails` no longer recompiles between attempts. +- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed. + +Bug Fixes: + +- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`. +- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic + +## 1.0.0 (5/24/2014) +New Features: + +- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode` + +Improvements: + +- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor. +- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified. + +Bug Fixes: + +- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s. +- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail. +- Fix all remaining race conditions in Ginkgo's test suite. + +## 1.0.0-beta (4/14/2014) +Breaking changes: + +- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead +- Modified the Reporter interface +- `watch` is now a subcommand, not a flag. + +DSL changes: + +- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites. +- `AfterSuite` is triggered on interrupt (`^C`) as well as exit. +- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes. + +CLI changes: + +- `watch` is now a subcommand, not a flag +- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot` +- Additional arguments can be passed to specs. Pass them after the `--` separator +- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp. +- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Ginkgo's internal to `internal` +- Rename `example` everywhere to `spec` +- Much more! diff --git a/vendor/github.com/onsi/ginkgo/LICENSE b/vendor/github.com/onsi/ginkgo/LICENSE new file mode 100644 index 00000000..9415ee72 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md new file mode 100644 index 00000000..b8b77b57 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -0,0 +1,115 @@ +![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) + +[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo) + +Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! + +To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega). + +## Feature List + +- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite) + +- Structure your BDD-style tests expressively: + - Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) + - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown + - [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions + - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). + - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. + +- A comprehensive test runner that lets you: + - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs) + - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line + - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order. + - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs) + +- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: + - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime + - `ginkgo -cover` runs your tests using Golang's code coverage tool + - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package + - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression + - `ginkgo -r` runs all tests suites under the current directory + - `ginkgo -v` prints out identifying information for each tests just before it runs + + And much more: run `ginkgo help` for details! + + The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test` + +- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop! + +- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests) + +- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. + +- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`. + +- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details. + +- A modular architecture that lets you easily: + - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter). + - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo + +## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library + +Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/) + +## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework + +Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org) + +## Set Me Up! + +You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!) + +```bash + +go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI +go get github.com/onsi/gomega # fetches the matcher library + +cd path/to/package/you/want/to/test + +ginkgo bootstrap # set up a new ginkgo suite +ginkgo generate # will create a sample test file. edit this file and add your tests then... + +go test # to run your tests + +ginkgo # also runs your tests + +``` + +## I'm new to Go: What are my testing options? + +Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set. + +With that said, it's great to know what your options are :) + +### What Golang gives you out of the box + +Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. + +### Matcher libraries for Golang's XUnit style tests + +A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction: + +- [testify](https://github.com/stretchr/testify) +- [gocheck](http://labix.org/gocheck) + +You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests) + +### BDD style testing frameworks + +There are a handful of BDD-style testing frameworks written for Golang. Here are a few: + +- [Ginkgo](https://github.com/onsi/ginkgo) ;) +- [GoConvey](https://github.com/smartystreets/goconvey) +- [Goblin](https://github.com/franela/goblin) +- [Mao](https://github.com/azer/mao) +- [Zen](https://github.com/pranavraja/zen) + +Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries. + +Go explore! + +## License + +Ginkgo is MIT-Licensed diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go new file mode 100644 index 00000000..46ce16aa --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -0,0 +1,170 @@ +/* +Ginkgo accepts a number of configuration options. + +These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli) + +You can also learn more via + + ginkgo help + +or (I kid you not): + + go test -asdf +*/ +package config + +import ( + "flag" + "time" + + "fmt" +) + +const VERSION = "1.2.0" + +type GinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + FocusString string + SkipString string + SkipMeasurements bool + FailOnPending bool + FailFast bool + EmitSpecProgress bool + DryRun bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +var GinkgoConfig = GinkgoConfigType{} + +type DefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + Succinct bool + Verbose bool + FullTrace bool +} + +var DefaultReporterConfig = DefaultReporterConfigType{} + +func processPrefix(prefix string) string { + if prefix != "" { + prefix = prefix + "." + } + return prefix +} + +func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { + prefix = processPrefix(prefix) + flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") + flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.") + flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.") + flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.") + flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.") + flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") + flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.") + flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.") + flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.") + + if includeParallelFlags { + flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.") + flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.") + flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.") + flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.") + } + + flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") + flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).") + flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") + flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") + flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") + flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") +} + +func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { + prefix = processPrefix(prefix) + result := make([]string, 0) + + if ginkgo.RandomSeed > 0 { + result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed)) + } + + if ginkgo.RandomizeAllSpecs { + result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix)) + } + + if ginkgo.SkipMeasurements { + result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix)) + } + + if ginkgo.FailOnPending { + result = append(result, fmt.Sprintf("--%sfailOnPending", prefix)) + } + + if ginkgo.FailFast { + result = append(result, fmt.Sprintf("--%sfailFast", prefix)) + } + + if ginkgo.DryRun { + result = append(result, fmt.Sprintf("--%sdryRun", prefix)) + } + + if ginkgo.FocusString != "" { + result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString)) + } + + if ginkgo.SkipString != "" { + result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString)) + } + + if ginkgo.EmitSpecProgress { + result = append(result, fmt.Sprintf("--%sprogress", prefix)) + } + + if ginkgo.ParallelNode != 0 { + result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode)) + } + + if ginkgo.ParallelTotal != 0 { + result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal)) + } + + if ginkgo.StreamHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost)) + } + + if ginkgo.SyncHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost)) + } + + if reporter.NoColor { + result = append(result, fmt.Sprintf("--%snoColor", prefix)) + } + + if reporter.SlowSpecThreshold > 0 { + result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold)) + } + + if !reporter.NoisyPendings { + result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix)) + } + + if reporter.Verbose { + result = append(result, fmt.Sprintf("--%sv", prefix)) + } + + if reporter.Succinct { + result = append(result, fmt.Sprintf("--%ssuccinct", prefix)) + } + + if reporter.FullTrace { + result = append(result, fmt.Sprintf("--%strace", prefix)) + } + + return result +} diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table.go b/vendor/github.com/onsi/ginkgo/extensions/table/table.go new file mode 100644 index 00000000..ae8ab7d2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/extensions/table/table.go @@ -0,0 +1,98 @@ +/* + +Table provides a simple DSL for Ginkgo-native Table-Driven Tests + +The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests + +*/ + +package table + +import ( + "fmt" + "reflect" + + "github.com/onsi/ginkgo" +) + +/* +DescribeTable describes a table-driven test. + +For example: + + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) + +The first argument to `DescribeTable` is a string description. +The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It. +The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors. + +The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function. + +Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`. + +It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run). + +Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable. +*/ +func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, false, false) + return true +} + +/* +You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. +*/ +func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, false, true) + return true +} + +/* +You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. +*/ +func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, true, false) + return true +} + +/* +You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`. +*/ +func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, true, false) + return true +} + +func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) { + itBodyValue := reflect.ValueOf(itBody) + if itBodyValue.Kind() != reflect.Func { + panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody)) + } + + if pending { + ginkgo.PDescribe(description, func() { + for _, entry := range entries { + entry.generateIt(itBodyValue) + } + }) + } else if focused { + ginkgo.FDescribe(description, func() { + for _, entry := range entries { + entry.generateIt(itBodyValue) + } + }) + } else { + ginkgo.Describe(description, func() { + for _, entry := range entries { + entry.generateIt(itBodyValue) + } + }) + } +} diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go new file mode 100644 index 00000000..a6a9e3cc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go @@ -0,0 +1,72 @@ +package table + +import ( + "reflect" + + "github.com/onsi/ginkgo" +) + +/* +TableEntry represents an entry in a table test. You generally use the `Entry` constructor. +*/ +type TableEntry struct { + Description string + Parameters []interface{} + Pending bool + Focused bool +} + +func (t TableEntry) generateIt(itBody reflect.Value) { + if t.Pending { + ginkgo.PIt(t.Description) + return + } + + values := []reflect.Value{} + for _, param := range t.Parameters { + values = append(values, reflect.ValueOf(param)) + } + + body := func() { + itBody.Call(values) + } + + if t.Focused { + ginkgo.FIt(t.Description, body) + } else { + ginkgo.It(t.Description, body) + } +} + +/* +Entry constructs a TableEntry. + +The first argument is a required description (this becomes the content of the generated Ginkgo `It`). +Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`. + +Each Entry ends up generating an individual Ginkgo It. +*/ +func Entry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, false, false} +} + +/* +You can focus a particular entry with FEntry. This is equivalent to FIt. +*/ +func FEntry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, false, true} +} + +/* +You can mark a particular entry as pending with PEntry. This is equivalent to PIt. +*/ +func PEntry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, true, false} +} + +/* +You can mark a particular entry as pending with XEntry. This is equivalent to XIt. +*/ +func XEntry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, true, false} +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go new file mode 100644 index 00000000..36f6d8e7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -0,0 +1,536 @@ +/* +Ginkgo is a BDD-style testing framework for Golang + +The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/ + +Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega) + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Ginkgo is MIT-Licensed +*/ +package ginkgo + +import ( + "flag" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/remote" + "github.com/onsi/ginkgo/internal/suite" + "github.com/onsi/ginkgo/internal/testingtproxy" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +const GINKGO_VERSION = config.VERSION +const GINKGO_PANIC = ` +Your test failed. +Ginkgo panics to prevent subsequent assertions from running. +Normally Ginkgo rescues this panic so you shouldn't see it. + +But, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. +` +const defaultTimeout = 1 + +var globalSuite *suite.Suite +var globalFailer *failer.Failer + +func init() { + config.Flags(flag.CommandLine, "ginkgo", true) + GinkgoWriter = writer.New(os.Stdout) + globalFailer = failer.New() + globalSuite = suite.New(globalFailer) +} + +//GinkgoWriter implements an io.Writer +//When running in verbose mode any writes to GinkgoWriter will be immediately printed +//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen +//only if the current test fails. +var GinkgoWriter io.Writer + +//The interface by which Ginkgo receives *testing.T +type GinkgoTestingT interface { + Fail() +} + +//GinkgoParallelNode returns the parallel node number for the current ginkgo process +//The node number is 1-indexed +func GinkgoParallelNode() int { + return config.GinkgoConfig.ParallelNode +} + +//Some matcher libraries or legacy codebases require a *testing.T +//GinkgoT implements an interface analogous to *testing.T and can be used if +//the library in question accepts *testing.T through an interface +// +// For example, with testify: +// assert.Equal(GinkgoT(), 123, 123, "they should be equal") +// +// Or with gomock: +// gomock.NewController(GinkgoT()) +// +// GinkgoT() takes an optional offset argument that can be used to get the +// correct line number associated with the failure. +func GinkgoT(optionalOffset ...int) GinkgoTInterface { + offset := 3 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return testingtproxy.New(GinkgoWriter, Fail, offset) +} + +//The interface returned by GinkgoT(). This covers most of the methods +//in the testing package's T. +type GinkgoTInterface interface { + Fail() + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Failed() bool + Parallel() + Skip(args ...interface{}) + Skipf(format string, args ...interface{}) + SkipNow() + Skipped() bool +} + +//Custom Ginkgo test reporters must implement the Reporter interface. +// +//The custom reporter is passed in a SuiteSummary when the suite begins and ends, +//and a SpecSummary just before a spec begins and just after a spec ends +type Reporter reporters.Reporter + +//Asynchronous specs are given a channel of the Done type. You must close or write to the channel +//to tell Ginkgo that your async test is done. +type Done chan<- interface{} + +//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription +// FullTestText: a concatenation of ComponentTexts and the TestText +// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test +// TestText: the text in the actual It or Measure node +// IsMeasurement: true if the current test is a measurement +// FileName: the name of the file containing the current test +// LineNumber: the line number for the current test +// Failed: if the current test has failed, this will be true (useful in an AfterEach) +type GinkgoTestDescription struct { + FullTestText string + ComponentTexts []string + TestText string + + IsMeasurement bool + + FileName string + LineNumber int + + Failed bool +} + +//CurrentGinkgoTestDescripton returns information about the current running test. +func CurrentGinkgoTestDescription() GinkgoTestDescription { + summary, ok := globalSuite.CurrentRunningSpecSummary() + if !ok { + return GinkgoTestDescription{} + } + + subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1] + + return GinkgoTestDescription{ + ComponentTexts: summary.ComponentTexts[1:], + FullTestText: strings.Join(summary.ComponentTexts[1:], " "), + TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1], + IsMeasurement: summary.IsMeasurement, + FileName: subjectCodeLocation.FileName, + LineNumber: subjectCodeLocation.LineNumber, + Failed: summary.HasFailureState(), + } +} + +//Measurement tests receive a Benchmarker. +// +//You use the Time() function to time how long the passed in body function takes to run +//You use the RecordValue() function to track arbitrary numerical measurements. +//The optional info argument is passed to the test reporter and can be used to +// provide the measurement data to a custom reporter with context. +// +//See http://onsi.github.io/ginkgo/#benchmark_tests for more details +type Benchmarker interface { + Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...interface{}) +} + +//RunSpecs is the entry point for the Ginkgo test runner. +//You must call this within a Golang testing TestX(t *testing.T) function. +// +//To bootstrap a test suite you can use the Ginkgo CLI: +// +// ginkgo bootstrap +func RunSpecs(t GinkgoTestingT, description string) bool { + specReporters := []Reporter{buildDefaultReporter()} + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace +//RunSpecs() with this method. +func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...) + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace +//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter +func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + writer := GinkgoWriter.(*writer.Writer) + writer.SetStream(config.DefaultReporterConfig.Verbose) + reporters := make([]reporters.Reporter, len(specReporters)) + for i, reporter := range specReporters { + reporters[i] = reporter + } + passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig) + if passed && hasFocusedTests { + fmt.Println("PASS | FOCUSED") + os.Exit(types.GINKGO_FOCUS_EXIT_CODE) + } + return passed +} + +func buildDefaultReporter() Reporter { + remoteReportingServer := config.GinkgoConfig.StreamHost + if remoteReportingServer == "" { + stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor) + return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) + } else { + return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor()) + } +} + +//Skip notifies Ginkgo that the current spec should be skipped. +func Skip(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Skip(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) +func Fail(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Fail(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` +//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that +//calls out to Gomega +// +//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent +//further assertions from running. This panic must be recovered. Ginkgo does this for you +//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...) +// +//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no +//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. +func GinkgoRecover() { + e := recover() + if e != nil { + globalFailer.Panic(codelocation.New(1), e) + } +} + +//Describe blocks allow you to organize your specs. A Describe block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts. +func Describe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FDescribe +func FDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PDescribe +func PDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XDescribe +func XDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//Context blocks allow you to organize your specs. A Context block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts. +func Context(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FContext +func FContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PContext +func PContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XContext +func XContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks +//within an It block. +// +//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a +//function that accepts a Done channel. When you do this, you can also provide an optional timeout. +func It(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can focus individual Its using FIt +func FIt(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can mark Its as pending using PIt +func PIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Its as pending using XIt +func XIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//By allows you to better document large Its. +// +//Generally you should try to keep your Its short and to the point. This is not always possible, however, +//especially in the context of integration tests that capture a particular workflow. +// +//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...) +//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. +func By(text string, callbacks ...func()) { + preamble := "\x1b[1mSTEP\x1b[0m" + if config.DefaultReporterConfig.NoColor { + preamble = "STEP" + } + fmt.Fprintln(GinkgoWriter, preamble+": "+text) + if len(callbacks) == 1 { + callbacks[0]() + } + if len(callbacks) > 1 { + panic("just one callback per By, please") + } +} + +//Measure blocks run the passed in body function repeatedly (determined by the samples argument) +//and accumulate metrics provided to the Benchmarker by the body function. +// +//The body function must have the signature: +// func(b Benchmarker) +func Measure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) + return true +} + +//You can focus individual Measures using FMeasure +func FMeasure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) + return true +} + +//You can mark Maeasurements as pending using PMeasure +func PMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Maeasurements as pending using XMeasure +func XMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each +//parallel node process will call BeforeSuite. +// +//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func BeforeSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed. +//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting. +// +//When running in parallel, each parallel node process will call AfterSuite. +// +//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func AfterSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across +//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that +//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait +//until that node is done before running. +// +//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is +//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1) +//to the second function (on all the other nodes). +// +//The functions have the following signatures. The first function (which only runs on node 1) has the signature: +// +// func() []byte +// +//or, to run asynchronously: +// +// func(done Done) []byte +// +//The byte array returned by the first function is then passed to the second function, which has the signature: +// +// func(data []byte) +// +//or, to run asynchronously: +// +// func(data []byte, done Done) +// +//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes: +// +// var dbClient db.Client +// var dbRunner db.Runner +// +// var _ = SynchronizedBeforeSuite(func() []byte { +// dbRunner = db.NewRunner() +// err := dbRunner.Start() +// Ω(err).ShouldNot(HaveOccurred()) +// return []byte(dbRunner.URL) +// }, func(data []byte) { +// dbClient = db.NewClient() +// err := dbClient.Connect(string(data)) +// Ω(err).ShouldNot(HaveOccurred()) +// }) +func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedBeforeSuiteNode( + node1Body, + allNodesBody, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up +//external singleton resources shared across nodes when running tests in parallel. +// +//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1 +//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until +//all other nodes are finished. +// +//Both functions have the same signature: either func() or func(done Done) to run asynchronously. +// +//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database +//only after all nodes have finished: +// +// var _ = SynchronizedAfterSuite(func() { +// dbClient.Cleanup() +// }, func() { +// dbRunner.Stop() +// }) +func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedAfterSuiteNode( + allNodesBody, + node1Body, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested +//Describe and Context blocks the outermost BeforeEach blocks are run first. +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func BeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details, +//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func JustBeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested +//Describe and Context blocks the innermost AfterEach blocks are run first. +// +//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func AfterEach(body interface{}, timeout ...float64) bool { + globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +func parseTimeout(timeout ...float64) time.Duration { + if len(timeout) == 0 { + return time.Duration(defaultTimeout * int64(time.Second)) + } else { + return time.Duration(timeout[0] * float64(time.Second)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go new file mode 100644 index 00000000..fa2f0bf7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go @@ -0,0 +1,32 @@ +package codelocation + +import ( + "regexp" + "runtime" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo/types" +) + +func New(skip int) types.CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + stackTrace := PruneStack(string(debug.Stack()), skip) + return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} +} + +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + if !re.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go new file mode 100644 index 00000000..0737746d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go @@ -0,0 +1,151 @@ +package containernode + +import ( + "math/rand" + "sort" + + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type subjectOrContainerNode struct { + containerNode *ContainerNode + subjectNode leafnodes.SubjectNode +} + +func (n subjectOrContainerNode) text() string { + if n.containerNode != nil { + return n.containerNode.Text() + } else { + return n.subjectNode.Text() + } +} + +type CollatedNodes struct { + Containers []*ContainerNode + Subject leafnodes.SubjectNode +} + +type ContainerNode struct { + text string + flag types.FlagType + codeLocation types.CodeLocation + + setupNodes []leafnodes.BasicNode + subjectAndContainerNodes []subjectOrContainerNode +} + +func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode { + return &ContainerNode{ + text: text, + flag: flag, + codeLocation: codeLocation, + } +} + +func (container *ContainerNode) Shuffle(r *rand.Rand) { + sort.Sort(container) + permutation := r.Perm(len(container.subjectAndContainerNodes)) + shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes)) + for i, j := range permutation { + shuffledNodes[i] = container.subjectAndContainerNodes[j] + } + container.subjectAndContainerNodes = shuffledNodes +} + +func (node *ContainerNode) BackPropagateProgrammaticFocus() bool { + if node.flag == types.FlagTypePending { + return false + } + + shouldUnfocus := false + for _, subjectOrContainerNode := range node.subjectAndContainerNodes { + if subjectOrContainerNode.containerNode != nil { + shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus + } else { + shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus + } + } + + if shouldUnfocus { + if node.flag == types.FlagTypeFocused { + node.flag = types.FlagTypeNone + } + return true + } + + return node.flag == types.FlagTypeFocused +} + +func (node *ContainerNode) Collate() []CollatedNodes { + return node.collate([]*ContainerNode{}) +} + +func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes { + collated := make([]CollatedNodes, 0) + + containers := make([]*ContainerNode, len(enclosingContainers)) + copy(containers, enclosingContainers) + containers = append(containers, node) + + for _, subjectOrContainer := range node.subjectAndContainerNodes { + if subjectOrContainer.containerNode != nil { + collated = append(collated, subjectOrContainer.containerNode.collate(containers)...) + } else { + collated = append(collated, CollatedNodes{ + Containers: containers, + Subject: subjectOrContainer.subjectNode, + }) + } + } + + return collated +} + +func (node *ContainerNode) PushContainerNode(container *ContainerNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container}) +} + +func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject}) +} + +func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) { + node.setupNodes = append(node.setupNodes, setupNode) +} + +func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode { + nodes := []leafnodes.BasicNode{} + for _, setupNode := range node.setupNodes { + if setupNode.Type() == nodeType { + nodes = append(nodes, setupNode) + } + } + return nodes +} + +func (node *ContainerNode) Text() string { + return node.text +} + +func (node *ContainerNode) CodeLocation() types.CodeLocation { + return node.codeLocation +} + +func (node *ContainerNode) Flag() types.FlagType { + return node.flag +} + +//sort.Interface + +func (node *ContainerNode) Len() int { + return len(node.subjectAndContainerNodes) +} + +func (node *ContainerNode) Less(i, j int) bool { + return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text() +} + +func (node *ContainerNode) Swap(i, j int) { + node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go new file mode 100644 index 00000000..678ea251 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go @@ -0,0 +1,92 @@ +package failer + +import ( + "fmt" + "sync" + + "github.com/onsi/ginkgo/types" +) + +type Failer struct { + lock *sync.Mutex + failure types.SpecFailure + state types.SpecState +} + +func New() *Failer { + return &Failer{ + lock: &sync.Mutex{}, + state: types.SpecStatePassed, + } +} + +func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStatePanicked + f.failure = types.SpecFailure{ + Message: "Test Panicked", + Location: location, + ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), + } + } +} + +func (f *Failer) Timeout(location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateTimedOut + f.failure = types.SpecFailure{ + Message: "Timed out", + Location: location, + } + } +} + +func (f *Failer) Fail(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateFailed + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) { + f.lock.Lock() + defer f.lock.Unlock() + + failure := f.failure + outcome := f.state + if outcome != types.SpecStatePassed { + failure.ComponentType = componentType + failure.ComponentIndex = componentIndex + failure.ComponentCodeLocation = componentCodeLocation + } + + f.state = types.SpecStatePassed + f.failure = types.SpecFailure{} + + return failure, outcome +} + +func (f *Failer) Skip(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateSkipped + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go new file mode 100644 index 00000000..bc0dd1a6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go @@ -0,0 +1,95 @@ +package leafnodes + +import ( + "math" + "time" + + "sync" + + "github.com/onsi/ginkgo/types" +) + +type benchmarker struct { + mu sync.Mutex + measurements map[string]*types.SpecMeasurement + orderCounter int +} + +func newBenchmarker() *benchmarker { + return &benchmarker{ + measurements: make(map[string]*types.SpecMeasurement, 0), + } +} + +func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) { + t := time.Now() + body() + elapsedTime = time.Since(t) + + b.mu.Lock() + defer b.mu.Unlock() + measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...) + measurement.Results = append(measurement.Results, elapsedTime.Seconds()) + + return +} + +func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) { + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...) + b.mu.Lock() + defer b.mu.Unlock() + measurement.Results = append(measurement.Results, value) +} + +func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement { + measurement, ok := b.measurements[name] + if !ok { + var computedInfo interface{} + computedInfo = nil + if len(info) > 0 { + computedInfo = info[0] + } + measurement = &types.SpecMeasurement{ + Name: name, + Info: computedInfo, + Order: b.orderCounter, + SmallestLabel: smallestLabel, + LargestLabel: largestLabel, + AverageLabel: averageLabel, + Units: units, + Results: make([]float64, 0), + } + b.measurements[name] = measurement + b.orderCounter++ + } + + return measurement +} + +func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement { + b.mu.Lock() + defer b.mu.Unlock() + for _, measurement := range b.measurements { + measurement.Smallest = math.MaxFloat64 + measurement.Largest = -math.MaxFloat64 + sum := float64(0) + sumOfSquares := float64(0) + + for _, result := range measurement.Results { + if result > measurement.Largest { + measurement.Largest = result + } + if result < measurement.Smallest { + measurement.Smallest = result + } + sum += result + sumOfSquares += result * result + } + + n := float64(len(measurement.Results)) + measurement.Average = sum / n + measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n)) + } + + return b.measurements +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go new file mode 100644 index 00000000..8c3902d6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go @@ -0,0 +1,19 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/types" +) + +type BasicNode interface { + Type() types.SpecComponentType + Run() (types.SpecState, types.SpecFailure) + CodeLocation() types.CodeLocation +} + +type SubjectNode interface { + BasicNode + + Text() string + Flag() types.FlagType + Samples() int +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go new file mode 100644 index 00000000..c76fe3a4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go @@ -0,0 +1,46 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "time" +) + +type ItNode struct { + runner *runner + + flag types.FlagType + text string +} + +func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode { + return &ItNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex), + flag: flag, + text: text, + } +} + +func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *ItNode) Type() types.SpecComponentType { + return types.SpecComponentTypeIt +} + +func (node *ItNode) Text() string { + return node.text +} + +func (node *ItNode) Flag() types.FlagType { + return node.flag +} + +func (node *ItNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *ItNode) Samples() int { + return 1 +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go new file mode 100644 index 00000000..efc3348c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go @@ -0,0 +1,61 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "reflect" +) + +type MeasureNode struct { + runner *runner + + text string + flag types.FlagType + samples int + benchmarker *benchmarker +} + +func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode { + benchmarker := newBenchmarker() + + wrappedBody := func() { + reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)}) + } + + return &MeasureNode{ + runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex), + + text: text, + flag: flag, + samples: samples, + benchmarker: benchmarker, + } +} + +func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement { + return node.benchmarker.measurementsReport() +} + +func (node *MeasureNode) Type() types.SpecComponentType { + return types.SpecComponentTypeMeasure +} + +func (node *MeasureNode) Text() string { + return node.text +} + +func (node *MeasureNode) Flag() types.FlagType { + return node.flag +} + +func (node *MeasureNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *MeasureNode) Samples() int { + return node.samples +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go new file mode 100644 index 00000000..003f8516 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go @@ -0,0 +1,113 @@ +package leafnodes + +import ( + "fmt" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "reflect" + "time" +) + +type runner struct { + isAsync bool + asyncFunc func(chan<- interface{}) + syncFunc func() + codeLocation types.CodeLocation + timeoutThreshold time.Duration + nodeType types.SpecComponentType + componentIndex int + failer *failer.Failer +} + +func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner { + bodyType := reflect.TypeOf(body) + if bodyType.Kind() != reflect.Func { + panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation)) + } + + runner := &runner{ + codeLocation: codeLocation, + timeoutThreshold: timeout, + failer: failer, + nodeType: nodeType, + componentIndex: componentIndex, + } + + switch bodyType.NumIn() { + case 0: + runner.syncFunc = body.(func()) + return runner + case 1: + if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) { + panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation)) + } + + wrappedBody := func(done chan<- interface{}) { + bodyValue := reflect.ValueOf(body) + bodyValue.Call([]reflect.Value{reflect.ValueOf(done)}) + } + + runner.isAsync = true + runner.asyncFunc = wrappedBody + return runner + } + + panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation)) +} + +func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) { + if r.isAsync { + return r.runAsync() + } else { + return r.runSync() + } +} + +func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) { + done := make(chan interface{}, 1) + + go func() { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + select { + case <-done: + break + default: + close(done) + } + } + }() + + r.asyncFunc(done) + finished = true + }() + + select { + case <-done: + case <-time.After(r.timeoutThreshold): + r.failer.Timeout(r.codeLocation) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + return +} +func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + }() + + r.syncFunc() + finished = true + + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go new file mode 100644 index 00000000..6b725a63 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go @@ -0,0 +1,41 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "time" +) + +type SetupNode struct { + runner *runner +} + +func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *SetupNode) Type() types.SpecComponentType { + return node.runner.nodeType +} + +func (node *SetupNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex), + } +} + +func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex), + } +} + +func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go new file mode 100644 index 00000000..2ccc7dc0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go @@ -0,0 +1,54 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "time" +) + +type SuiteNode interface { + Run(parallelNode int, parallelTotal int, syncHost string) bool + Passed() bool + Summary() *types.SetupSummary +} + +type simpleSuiteNode struct { + runner *runner + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + node.outcome, node.failure = node.runner.run() + node.runTime = time.Since(t) + + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runner.nodeType, + CodeLocation: node.runner.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0), + } +} + +func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go new file mode 100644 index 00000000..e7030d91 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go @@ -0,0 +1,89 @@ +package leafnodes + +import ( + "encoding/json" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "io/ioutil" + "net/http" + "time" +) + +type synchronizedAfterSuiteNode struct { + runnerA *runner + runnerB *runner + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &synchronizedAfterSuiteNode{ + runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} + +func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + node.outcome, node.failure = node.runnerA.run() + + if parallelNode == 1 { + if parallelTotal > 1 { + node.waitUntilOtherNodesAreDone(syncHost) + } + + outcome, failure := node.runnerB.run() + + if node.outcome == types.SpecStatePassed { + node.outcome, node.failure = outcome, failure + } + } + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) { + for { + if node.canRun(syncHost) { + return + } + + time.Sleep(50 * time.Millisecond) + } +} + +func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool { + resp, err := http.Get(syncHost + "/RemoteAfterSuiteData") + if err != nil || resp.StatusCode != http.StatusOK { + return false + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false + } + resp.Body.Close() + + afterSuiteData := types.RemoteAfterSuiteData{} + err = json.Unmarshal(body, &afterSuiteData) + if err != nil { + return false + } + + return afterSuiteData.CanRun +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go new file mode 100644 index 00000000..76a96798 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go @@ -0,0 +1,182 @@ +package leafnodes + +import ( + "bytes" + "encoding/json" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "io/ioutil" + "net/http" + "reflect" + "time" +) + +type synchronizedBeforeSuiteNode struct { + runnerA *runner + runnerB *runner + + data []byte + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + node := &synchronizedBeforeSuiteNode{} + + node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + + return node +} + +func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + defer func() { + node.runTime = time.Since(t) + }() + + if parallelNode == 1 { + node.outcome, node.failure = node.runA(parallelTotal, syncHost) + } else { + node.outcome, node.failure = node.waitForA(syncHost) + } + + if node.outcome != types.SpecStatePassed { + return false + } + node.outcome, node.failure = node.runnerB.run() + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) { + outcome, failure := node.runnerA.run() + + if parallelTotal > 1 { + state := types.RemoteBeforeSuiteStatePassed + if outcome != types.SpecStatePassed { + state = types.RemoteBeforeSuiteStateFailed + } + json := (types.RemoteBeforeSuiteData{ + Data: node.data, + State: state, + }).ToJSON() + http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json)) + } + + return outcome, failure +} + +func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) { + failure := func(message string) types.SpecFailure { + return types.SpecFailure{ + Message: message, + Location: node.runnerA.codeLocation, + ComponentType: node.runnerA.nodeType, + ComponentIndex: node.runnerA.componentIndex, + ComponentCodeLocation: node.runnerA.codeLocation, + } + } + for { + resp, err := http.Get(syncHost + "/BeforeSuiteState") + if err != nil || resp.StatusCode != http.StatusOK { + return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state") + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return types.SpecStateFailed, failure("Failed to read BeforeSuite state") + } + resp.Body.Close() + + beforeSuiteData := types.RemoteBeforeSuiteData{} + err = json.Unmarshal(body, &beforeSuiteData) + if err != nil { + return types.SpecStateFailed, failure("Failed to decode BeforeSuite state") + } + + switch beforeSuiteData.State { + case types.RemoteBeforeSuiteStatePassed: + node.data = beforeSuiteData.Data + return types.SpecStatePassed, types.SpecFailure{} + case types.RemoteBeforeSuiteStateFailed: + return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed") + case types.RemoteBeforeSuiteStateDisappeared: + return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite") + } + + time.Sleep(50 * time.Millisecond) + } + + return types.SpecStateFailed, failure("Shouldn't get here!") +} + +func (node *synchronizedBeforeSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} { + typeA := reflect.TypeOf(bodyA) + if typeA.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its first argument") + } + + takesNothing := typeA.NumIn() == 0 + takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface + returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8 + + if !((takesNothing || takesADoneChannel) && returnsBytes) { + panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.") + } + + if takesADoneChannel { + return func(done chan<- interface{}) { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)}) + node.data = out[0].Interface().([]byte) + } + } + + return func() { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{}) + node.data = out[0].Interface().([]byte) + } +} + +func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} { + typeB := reflect.TypeOf(bodyB) + if typeB.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its second argument") + } + + returnsNothing := typeB.NumOut() == 0 + takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 + takesBytesAndDone := typeB.NumIn() == 2 && + typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 && + typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface + + if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) { + panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)") + } + + if takesBytesAndDone { + return func(done chan<- interface{}) { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)}) + } + } + + return func() { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)}) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go new file mode 100644 index 00000000..1e34dbf6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -0,0 +1,250 @@ +/* + +Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output +coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel: + + ginkgo -nodes=N + +where N is the number of nodes you desire. +*/ +package remote + +import ( + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type configAndSuite struct { + config config.GinkgoConfigType + summary *types.SuiteSummary +} + +type Aggregator struct { + nodeCount int + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + result chan bool + + suiteBeginnings chan configAndSuite + aggregatedSuiteBeginnings []configAndSuite + + beforeSuites chan *types.SetupSummary + aggregatedBeforeSuites []*types.SetupSummary + + afterSuites chan *types.SetupSummary + aggregatedAfterSuites []*types.SetupSummary + + specCompletions chan *types.SpecSummary + completedSpecs []*types.SpecSummary + + suiteEndings chan *types.SuiteSummary + aggregatedSuiteEndings []*types.SuiteSummary + specs []*types.SpecSummary + + startTime time.Time +} + +func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator { + aggregator := &Aggregator{ + nodeCount: nodeCount, + result: result, + config: config, + stenographer: stenographer, + + suiteBeginnings: make(chan configAndSuite, 0), + beforeSuites: make(chan *types.SetupSummary, 0), + afterSuites: make(chan *types.SetupSummary, 0), + specCompletions: make(chan *types.SpecSummary, 0), + suiteEndings: make(chan *types.SuiteSummary, 0), + } + + go aggregator.mux() + + return aggregator +} + +func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + aggregator.suiteBeginnings <- configAndSuite{config, summary} +} + +func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.beforeSuites <- setupSummary +} + +func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.afterSuites <- setupSummary +} + +func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { + //noop +} + +func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { + aggregator.specCompletions <- specSummary +} + +func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { + aggregator.suiteEndings <- summary +} + +func (aggregator *Aggregator) mux() { +loop: + for { + select { + case configAndSuite := <-aggregator.suiteBeginnings: + aggregator.registerSuiteBeginning(configAndSuite) + case setupSummary := <-aggregator.beforeSuites: + aggregator.registerBeforeSuite(setupSummary) + case setupSummary := <-aggregator.afterSuites: + aggregator.registerAfterSuite(setupSummary) + case specSummary := <-aggregator.specCompletions: + aggregator.registerSpecCompletion(specSummary) + case suite := <-aggregator.suiteEndings: + finished, passed := aggregator.registerSuiteEnding(suite) + if finished { + aggregator.result <- passed + break loop + } + } + } +} + +func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) { + aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite) + + if len(aggregator.aggregatedSuiteBeginnings) == 1 { + aggregator.startTime = time.Now() + } + + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) + + numberOfSpecsToRun := 0 + totalNumberOfSpecs := 0 + for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings { + numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun + totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs + } + + aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct) + aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) { + aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary) + aggregator.specs = append(aggregator.specs, specSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) flushCompletedSpecs() { + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + for _, setupSummary := range aggregator.aggregatedBeforeSuites { + aggregator.announceBeforeSuite(setupSummary) + } + + for _, specSummary := range aggregator.completedSpecs { + aggregator.announceSpec(specSummary) + } + + for _, setupSummary := range aggregator.aggregatedAfterSuites { + aggregator.announceAfterSuite(setupSummary) + } + + aggregator.aggregatedBeforeSuites = []*types.SetupSummary{} + aggregator.completedSpecs = []*types.SpecSummary{} + aggregator.aggregatedAfterSuites = []*types.SetupSummary{} +} + +func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { + if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + aggregator.stenographer.AnnounceSpecWillRun(specSummary) + } + + aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) + + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct) + } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold { + aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct) + } else { + aggregator.stenographer.AnnounceSuccesfulSpec(specSummary) + } + + case types.SpecStatePending: + aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct) + case types.SpecStateSkipped: + aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStateTimedOut: + aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStatePanicked: + aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStateFailed: + aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) { + aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite) + if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { + return false, false + } + + aggregatedSuiteSummary := &types.SuiteSummary{} + aggregatedSuiteSummary.SuiteSucceeded = true + + for _, suiteSummary := range aggregator.aggregatedSuiteEndings { + if suiteSummary.SuiteSucceeded == false { + aggregatedSuiteSummary.SuiteSucceeded = false + } + + aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun + aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs + aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs + aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs + aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs + aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs + } + + aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) + + aggregator.stenographer.SummarizeFailures(aggregator.specs) + aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) + + return true, aggregatedSuiteSummary.SuiteSucceeded +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go new file mode 100644 index 00000000..025eb506 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go @@ -0,0 +1,90 @@ +package remote + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//An interface to net/http's client to allow the injection of fakes under test +type Poster interface { + Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) +} + +/* +The ForwardingReporter is a Ginkgo reporter that forwards information to +a Ginkgo remote server. + +When streaming parallel test output, this repoter is automatically installed by Ginkgo. + +This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner +detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter +in place of Ginkgo's DefaultReporter. +*/ + +type ForwardingReporter struct { + serverHost string + poster Poster + outputInterceptor OutputInterceptor +} + +func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter { + return &ForwardingReporter{ + serverHost: serverHost, + poster: poster, + outputInterceptor: outputInterceptor, + } +} + +func (reporter *ForwardingReporter) post(path string, data interface{}) { + encoded, _ := json.Marshal(data) + buffer := bytes.NewBuffer(encoded) + reporter.poster.Post(reporter.serverHost+path, "application/json", buffer) +} + +func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) { + data := struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + }{ + conf, + summary, + } + + reporter.outputInterceptor.StartInterceptingOutput() + reporter.post("/SpecSuiteWillBegin", data) +} + +func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + reporter.post("/BeforeSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { + reporter.post("/SpecWillRun", specSummary) +} + +func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + specSummary.CapturedOutput = output + reporter.post("/SpecDidComplete", specSummary) +} + +func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + reporter.post("/AfterSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.post("/SpecSuiteDidEnd", summary) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go new file mode 100644 index 00000000..093f4513 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go @@ -0,0 +1,10 @@ +package remote + +/* +The OutputInterceptor is used by the ForwardingReporter to +intercept and capture all stdin and stderr output during a test run. +*/ +type OutputInterceptor interface { + StartInterceptingOutput() error + StopInterceptingAndReturnOutput() (string, error) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go new file mode 100644 index 00000000..181b227e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -0,0 +1,52 @@ +// +build freebsd openbsd netbsd dragonfly darwin linux + +package remote + +import ( + "errors" + "io/ioutil" + "os" + "syscall" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + redirectFile *os.File + intercepting bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + var err error + + interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output") + if err != nil { + return err + } + + syscall.Dup2(int(interceptor.redirectFile.Fd()), 1) + syscall.Dup2(int(interceptor.redirectFile.Fd()), 2) + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + if !interceptor.intercepting { + return "", errors.New("Not intercepting output!") + } + + interceptor.redirectFile.Close() + output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) + os.Remove(interceptor.redirectFile.Name()) + + interceptor.intercepting = false + + return string(output), err +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go new file mode 100644 index 00000000..c8f97d97 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go @@ -0,0 +1,33 @@ +// +build windows + +package remote + +import ( + "errors" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + intercepting bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + // not working on windows... + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + // not working on windows... + interceptor.intercepting = false + + return "", nil +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go new file mode 100644 index 00000000..b55c681b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go @@ -0,0 +1,204 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package remote + +import ( + "encoding/json" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + "io/ioutil" + "net" + "net/http" + "sync" +) + +/* +Server spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type Server struct { + listener net.Listener + reporters []reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteData types.RemoteBeforeSuiteData + parallelTotal int +} + +//Create a new server, automatically selecting a port +func NewServer(parallelTotal int) (*Server, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &Server{ + listener: listener, + lock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}, + parallelTotal: parallelTotal, + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *Server) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin) + mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun) + mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun) + mux.HandleFunc("/SpecWillRun", server.specWillRun) + mux.HandleFunc("/SpecDidComplete", server.specDidComplete) + mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd) + + //synchronization endpoints + mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) + mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *Server) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *Server) Address() string { + return "http://" + server.listener.Addr().String() +} + +// +// Streaming Endpoints +// + +//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *Server) readAll(request *http.Request) []byte { + defer request.Body.Close() + body, _ := ioutil.ReadAll(request.Body) + return body +} + +func (server *Server) RegisterReporters(reporters ...reporters.Reporter) { + server.reporters = reporters +} + +func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + + var data struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + } + + json.Unmarshal(body, &data) + + for _, reporter := range server.reporters { + reporter.SpecSuiteWillBegin(data.Config, data.Summary) + } +} + +func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.BeforeSuiteDidRun(setupSummary) + } +} + +func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.AfterSuiteDidRun(setupSummary) + } +} + +func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecWillRun(specSummary) + } +} + +func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecDidComplete(specSummary) + } +} + +func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var suiteSummary *types.SuiteSummary + json.Unmarshal(body, &suiteSummary) + + for _, reporter := range server.reporters { + reporter.SpecSuiteDidEnd(suiteSummary) + } +} + +// +// Synchronization Endpoints +// + +func (server *Server) RegisterAlive(node int, alive func() bool) { + server.lock.Lock() + defer server.lock.Unlock() + server.alives[node-1] = alive +} + +func (server *Server) nodeIsAlive(node int) bool { + server.lock.Lock() + defer server.lock.Unlock() + alive := server.alives[node-1] + if alive == nil { + return true + } + return alive() +} + +func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + if request.Method == "POST" { + dec := json.NewDecoder(request.Body) + dec.Decode(&(server.beforeSuiteData)) + } else { + beforeSuiteData := server.beforeSuiteData + if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) { + beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared + } + enc := json.NewEncoder(writer) + enc.Encode(beforeSuiteData) + } +} + +func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) { + afterSuiteData := types.RemoteAfterSuiteData{ + CanRun: true, + } + for i := 2; i <= server.parallelTotal; i++ { + afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i) + } + + enc := json.NewEncoder(writer) + enc.Encode(afterSuiteData) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go new file mode 100644 index 00000000..5a67fc7b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go @@ -0,0 +1,55 @@ +package spec + +func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { + if length == 0 { + return 0, 0 + } + + // We have more nodes than tests. Trivial case. + if parallelTotal >= length { + if parallelNode > length { + return 0, 0 + } else { + return parallelNode - 1, 1 + } + } + + // This is the minimum amount of tests that a node will be required to run + minTestsPerNode := length / parallelTotal + + // This is the maximum amount of tests that a node will be required to run + // The algorithm guarantees that this would be equal to at least the minimum amount + // and at most one more + maxTestsPerNode := minTestsPerNode + if length%parallelTotal != 0 { + maxTestsPerNode++ + } + + // Number of nodes that will have to run the maximum amount of tests per node + numMaxLoadNodes := length % parallelTotal + + // Number of nodes that precede the current node and will have to run the maximum amount of tests per node + var numPrecedingMaxLoadNodes int + if parallelNode > numMaxLoadNodes { + numPrecedingMaxLoadNodes = numMaxLoadNodes + } else { + numPrecedingMaxLoadNodes = parallelNode - 1 + } + + // Number of nodes that precede the current node and will have to run the minimum amount of tests per node + var numPrecedingMinLoadNodes int + if parallelNode <= numMaxLoadNodes { + numPrecedingMinLoadNodes = 0 + } else { + numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1 + } + + // Evaluate the test start index and number of tests to run + startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode + if parallelNode > numMaxLoadNodes { + count = minTestsPerNode + } else { + count = maxTestsPerNode + } + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go new file mode 100644 index 00000000..ef788b76 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go @@ -0,0 +1,197 @@ +package spec + +import ( + "fmt" + "io" + "time" + + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type Spec struct { + subject leafnodes.SubjectNode + focused bool + announceProgress bool + + containers []*containernode.ContainerNode + + state types.SpecState + runTime time.Duration + failure types.SpecFailure +} + +func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec { + spec := &Spec{ + subject: subject, + containers: containers, + focused: subject.Flag() == types.FlagTypeFocused, + announceProgress: announceProgress, + } + + spec.processFlag(subject.Flag()) + for i := len(containers) - 1; i >= 0; i-- { + spec.processFlag(containers[i].Flag()) + } + + return spec +} + +func (spec *Spec) processFlag(flag types.FlagType) { + if flag == types.FlagTypeFocused { + spec.focused = true + } else if flag == types.FlagTypePending { + spec.state = types.SpecStatePending + } +} + +func (spec *Spec) Skip() { + spec.state = types.SpecStateSkipped +} + +func (spec *Spec) Failed() bool { + return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut +} + +func (spec *Spec) Passed() bool { + return spec.state == types.SpecStatePassed +} + +func (spec *Spec) Pending() bool { + return spec.state == types.SpecStatePending +} + +func (spec *Spec) Skipped() bool { + return spec.state == types.SpecStateSkipped +} + +func (spec *Spec) Focused() bool { + return spec.focused +} + +func (spec *Spec) IsMeasurement() bool { + return spec.subject.Type() == types.SpecComponentTypeMeasure +} + +func (spec *Spec) Summary(suiteID string) *types.SpecSummary { + componentTexts := make([]string, len(spec.containers)+1) + componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1) + + for i, container := range spec.containers { + componentTexts[i] = container.Text() + componentCodeLocations[i] = container.CodeLocation() + } + + componentTexts[len(spec.containers)] = spec.subject.Text() + componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation() + + return &types.SpecSummary{ + IsMeasurement: spec.IsMeasurement(), + NumberOfSamples: spec.subject.Samples(), + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.state, + RunTime: spec.runTime, + Failure: spec.failure, + Measurements: spec.measurementsReport(), + SuiteID: suiteID, + } +} + +func (spec *Spec) ConcatenatedString() string { + s := "" + for _, container := range spec.containers { + s += container.Text() + " " + } + + return s + spec.subject.Text() +} + +func (spec *Spec) Run(writer io.Writer) { + startTime := time.Now() + defer func() { + spec.runTime = time.Since(startTime) + }() + + for sample := 0; sample < spec.subject.Samples(); sample++ { + spec.runSample(sample, writer) + + if spec.state != types.SpecStatePassed { + return + } + } +} + +func (spec *Spec) runSample(sample int, writer io.Writer) { + spec.state = types.SpecStatePassed + spec.failure = types.SpecFailure{} + innerMostContainerIndexToUnwind := -1 + + defer func() { + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { + container := spec.containers[i] + for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { + spec.announceSetupNode(writer, "AfterEach", container, afterEach) + afterEachState, afterEachFailure := afterEach.Run() + if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed { + spec.state = afterEachState + spec.failure = afterEachFailure + } + } + } + }() + + for i, container := range spec.containers { + innerMostContainerIndexToUnwind = i + for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { + spec.announceSetupNode(writer, "BeforeEach", container, beforeEach) + spec.state, spec.failure = beforeEach.Run() + if spec.state != types.SpecStatePassed { + return + } + } + } + + for _, container := range spec.containers { + for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { + spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach) + spec.state, spec.failure = justBeforeEach.Run() + if spec.state != types.SpecStatePassed { + return + } + } + } + + spec.announceSubject(writer, spec.subject) + spec.state, spec.failure = spec.subject.Run() +} + +func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) { + if spec.announceProgress { + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) { + if spec.announceProgress { + nodeType := "" + switch subject.Type() { + case types.SpecComponentTypeIt: + nodeType = "It" + case types.SpecComponentTypeMeasure: + nodeType = "Measure" + } + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement { + if !spec.IsMeasurement() || spec.Failed() { + return map[string]*types.SpecMeasurement{} + } + + return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport() +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go new file mode 100644 index 00000000..9c671e39 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -0,0 +1,122 @@ +package spec + +import ( + "math/rand" + "regexp" + "sort" +) + +type Specs struct { + specs []*Spec + numberOfOriginalSpecs int + hasProgrammaticFocus bool +} + +func NewSpecs(specs []*Spec) *Specs { + return &Specs{ + specs: specs, + numberOfOriginalSpecs: len(specs), + } +} + +func (e *Specs) Specs() []*Spec { + return e.specs +} + +func (e *Specs) NumberOfOriginalSpecs() int { + return e.numberOfOriginalSpecs +} + +func (e *Specs) HasProgrammaticFocus() bool { + return e.hasProgrammaticFocus +} + +func (e *Specs) Shuffle(r *rand.Rand) { + sort.Sort(e) + permutation := r.Perm(len(e.specs)) + shuffledSpecs := make([]*Spec, len(e.specs)) + for i, j := range permutation { + shuffledSpecs[i] = e.specs[j] + } + e.specs = shuffledSpecs +} + +func (e *Specs) ApplyFocus(description string, focusString string, skipString string) { + if focusString == "" && skipString == "" { + e.applyProgrammaticFocus() + } else { + e.applyRegExpFocus(description, focusString, skipString) + } +} + +func (e *Specs) applyProgrammaticFocus() { + e.hasProgrammaticFocus = false + for _, spec := range e.specs { + if spec.Focused() && !spec.Pending() { + e.hasProgrammaticFocus = true + break + } + } + + if e.hasProgrammaticFocus { + for _, spec := range e.specs { + if !spec.Focused() { + spec.Skip() + } + } + } +} + +func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) { + for _, spec := range e.specs { + matchesFocus := true + matchesSkip := false + + toMatch := []byte(description + " " + spec.ConcatenatedString()) + + if focusString != "" { + focusFilter := regexp.MustCompile(focusString) + matchesFocus = focusFilter.Match([]byte(toMatch)) + } + + if skipString != "" { + skipFilter := regexp.MustCompile(skipString) + matchesSkip = skipFilter.Match([]byte(toMatch)) + } + + if !matchesFocus || matchesSkip { + spec.Skip() + } + } +} + +func (e *Specs) SkipMeasurements() { + for _, spec := range e.specs { + if spec.IsMeasurement() { + spec.Skip() + } + } +} + +func (e *Specs) TrimForParallelization(total int, node int) { + startIndex, count := ParallelizedIndexRange(len(e.specs), total, node) + if count == 0 { + e.specs = make([]*Spec, 0) + } else { + e.specs = e.specs[startIndex : startIndex+count] + } +} + +//sort.Interface + +func (e *Specs) Len() int { + return len(e.specs) +} + +func (e *Specs) Less(i, j int) bool { + return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString() +} + +func (e *Specs) Swap(i, j int) { + e.specs[i], e.specs[j] = e.specs[j], e.specs[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go new file mode 100644 index 00000000..a0b8b62d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go @@ -0,0 +1,15 @@ +package specrunner + +import ( + "crypto/rand" + "fmt" +) + +func randomID() string { + b := make([]byte, 8) + _, err := rand.Read(b) + if err != nil { + return "" + } + return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8]) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go new file mode 100644 index 00000000..7ca7740b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go @@ -0,0 +1,324 @@ +package specrunner + +import ( + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + Writer "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + + "time" +) + +type SpecRunner struct { + description string + beforeSuiteNode leafnodes.SuiteNode + specs *spec.Specs + afterSuiteNode leafnodes.SuiteNode + reporters []reporters.Reporter + startTime time.Time + suiteID string + runningSpec *spec.Spec + writer Writer.WriterInterface + config config.GinkgoConfigType + interrupted bool + lock *sync.Mutex +} + +func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { + return &SpecRunner{ + description: description, + beforeSuiteNode: beforeSuiteNode, + specs: specs, + afterSuiteNode: afterSuiteNode, + reporters: reporters, + writer: writer, + config: config, + suiteID: randomID(), + lock: &sync.Mutex{}, + } +} + +func (runner *SpecRunner) Run() bool { + if runner.config.DryRun { + runner.performDryRun() + return true + } + + runner.reportSuiteWillBegin() + go runner.registerForInterrupts() + + suitePassed := runner.runBeforeSuite() + + if suitePassed { + suitePassed = runner.runSpecs() + } + + runner.blockForeverIfInterrupted() + + suitePassed = runner.runAfterSuite() && suitePassed + + runner.reportSuiteDidEnd(suitePassed) + + return suitePassed +} + +func (runner *SpecRunner) performDryRun() { + runner.reportSuiteWillBegin() + + if runner.beforeSuiteNode != nil { + summary := runner.beforeSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportBeforeSuite(summary) + } + + for _, spec := range runner.specs.Specs() { + summary := spec.Summary(runner.suiteID) + runner.reportSpecWillRun(summary) + if summary.State == types.SpecStateInvalid { + summary.State = types.SpecStatePassed + } + runner.reportSpecDidComplete(summary, false) + } + + if runner.afterSuiteNode != nil { + summary := runner.afterSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportAfterSuite(summary) + } + + runner.reportSuiteDidEnd(true) +} + +func (runner *SpecRunner) runBeforeSuite() bool { + if runner.beforeSuiteNode == nil || runner.wasInterrupted() { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportBeforeSuite(runner.beforeSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runAfterSuite() bool { + if runner.afterSuiteNode == nil { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportAfterSuite(runner.afterSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runSpecs() bool { + suiteFailed := false + skipRemainingSpecs := false + for _, spec := range runner.specs.Specs() { + if runner.wasInterrupted() { + return suiteFailed + } + if skipRemainingSpecs { + spec.Skip() + } + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + + if !spec.Skipped() && !spec.Pending() { + runner.runningSpec = spec + spec.Run(runner.writer) + runner.runningSpec = nil + if spec.Failed() { + suiteFailed = true + } + } else if spec.Pending() && runner.config.FailOnPending { + suiteFailed = true + } + + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + + if spec.Failed() && runner.config.FailFast { + skipRemainingSpecs = true + } + } + + return !suiteFailed +} + +func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) { + if runner.runningSpec == nil { + return nil, false + } + + return runner.runningSpec.Summary(runner.suiteID), true +} + +func (runner *SpecRunner) registerForInterrupts() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + + <-c + signal.Stop(c) + runner.markInterrupted() + go runner.registerForHardInterrupts() + runner.writer.DumpOutWithHeader(` +Received interrupt. Emitting contents of GinkgoWriter... +--------------------------------------------------------- +`) + if runner.afterSuiteNode != nil { + fmt.Fprint(os.Stderr, ` +--------------------------------------------------------- +Received interrupt. Running AfterSuite... +^C again to terminate immediately +`) + runner.runAfterSuite() + } + runner.reportSuiteDidEnd(false) + os.Exit(1) +} + +func (runner *SpecRunner) registerForHardInterrupts() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + + <-c + fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.") + os.Exit(1) +} + +func (runner *SpecRunner) blockForeverIfInterrupted() { + runner.lock.Lock() + interrupted := runner.interrupted + runner.lock.Unlock() + + if interrupted { + select {} + } +} + +func (runner *SpecRunner) markInterrupted() { + runner.lock.Lock() + defer runner.lock.Unlock() + runner.interrupted = true +} + +func (runner *SpecRunner) wasInterrupted() bool { + runner.lock.Lock() + defer runner.lock.Unlock() + return runner.interrupted +} + +func (runner *SpecRunner) reportSuiteWillBegin() { + runner.startTime = time.Now() + summary := runner.summary(true) + for _, reporter := range runner.reporters { + reporter.SpecSuiteWillBegin(runner.config, summary) + } +} + +func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.BeforeSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.AfterSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { + runner.writer.Truncate() + + for _, reporter := range runner.reporters { + reporter.SpecWillRun(summary) + } +} + +func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { + for i := len(runner.reporters) - 1; i >= 1; i-- { + runner.reporters[i].SpecDidComplete(summary) + } + + if failed { + runner.writer.DumpOut() + } + + runner.reporters[0].SpecDidComplete(summary) +} + +func (runner *SpecRunner) reportSuiteDidEnd(success bool) { + summary := runner.summary(success) + summary.RunTime = time.Since(runner.startTime) + for _, reporter := range runner.reporters { + reporter.SpecSuiteDidEnd(summary) + } +} + +func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) { + count = 0 + + for _, spec := range runner.specs.Specs() { + if filter(spec) { + count++ + } + } + + return count +} + +func (runner *SpecRunner) summary(success bool) *types.SuiteSummary { + numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + return !ex.Skipped() && !ex.Pending() + }) + + numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + return ex.Pending() + }) + + numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + return ex.Skipped() + }) + + numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + return ex.Passed() + }) + + numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + return ex.Failed() + }) + + if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun { + numberOfFailedSpecs = numberOfSpecsThatWillBeRun + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteSucceeded: success, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(), + NumberOfTotalSpecs: len(runner.specs.Specs()), + NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, + NumberOfPendingSpecs: numberOfPendingSpecs, + NumberOfSkippedSpecs: numberOfSkippedSpecs, + NumberOfPassedSpecs: numberOfPassedSpecs, + NumberOfFailedSpecs: numberOfFailedSpecs, + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go new file mode 100644 index 00000000..a054602f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -0,0 +1,171 @@ +package suite + +import ( + "math/rand" + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/internal/specrunner" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +type ginkgoTestingT interface { + Fail() +} + +type Suite struct { + topLevelContainer *containernode.ContainerNode + currentContainer *containernode.ContainerNode + containerIndex int + beforeSuiteNode leafnodes.SuiteNode + afterSuiteNode leafnodes.SuiteNode + runner *specrunner.SpecRunner + failer *failer.Failer + running bool +} + +func New(failer *failer.Failer) *Suite { + topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{}) + + return &Suite{ + topLevelContainer: topLevelContainer, + currentContainer: topLevelContainer, + failer: failer, + containerIndex: 1, + } +} + +func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) { + if config.ParallelTotal < 1 { + panic("ginkgo.parallel.total must be >= 1") + } + + if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 { + panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total") + } + + r := rand.New(rand.NewSource(config.RandomSeed)) + suite.topLevelContainer.Shuffle(r) + specs := suite.generateSpecs(description, config) + suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config) + + suite.running = true + success := suite.runner.Run() + if !success { + t.Fail() + } + return success, specs.HasProgrammaticFocus() +} + +func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs { + specsSlice := []*spec.Spec{} + suite.topLevelContainer.BackPropagateProgrammaticFocus() + for _, collatedNodes := range suite.topLevelContainer.Collate() { + specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress)) + } + + specs := spec.NewSpecs(specsSlice) + + if config.RandomizeAllSpecs { + specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) + } + + specs.ApplyFocus(description, config.FocusString, config.SkipString) + + if config.SkipMeasurements { + specs.SkipMeasurements() + } + + if config.ParallelTotal > 1 { + specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode) + } + + return specs +} + +func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { + return suite.runner.CurrentSpecSummary() +} + +func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) { + container := containernode.New(text, flag, codeLocation) + suite.currentContainer.PushContainerNode(container) + + previousContainer := suite.currentContainer + suite.currentContainer = container + suite.containerIndex++ + + body() + + suite.containerIndex-- + suite.currentContainer = previousContainer +} + +func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) { + if suite.running { + suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go new file mode 100644 index 00000000..a2b9af80 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go @@ -0,0 +1,76 @@ +package testingtproxy + +import ( + "fmt" + "io" +) + +type failFunc func(message string, callerSkip ...int) + +func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy { + return &ginkgoTestingTProxy{ + fail: fail, + offset: offset, + writer: writer, + } +} + +type ginkgoTestingTProxy struct { + fail failFunc + offset int + writer io.Writer +} + +func (t *ginkgoTestingTProxy) Error(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fail() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) FailNow() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Log(args ...interface{}) { + fmt.Fprintln(t.writer, args...) +} + +func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { + fmt.Fprintf(t.writer, format, args...) +} + +func (t *ginkgoTestingTProxy) Failed() bool { + return false +} + +func (t *ginkgoTestingTProxy) Parallel() { +} + +func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { + fmt.Println(args...) +} + +func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { + fmt.Printf(format, args...) +} + +func (t *ginkgoTestingTProxy) SkipNow() { +} + +func (t *ginkgoTestingTProxy) Skipped() bool { + return false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go new file mode 100644 index 00000000..ac6540f0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go @@ -0,0 +1,31 @@ +package writer + +type FakeGinkgoWriter struct { + EventStream []string +} + +func NewFake() *FakeGinkgoWriter { + return &FakeGinkgoWriter{ + EventStream: []string{}, + } +} + +func (writer *FakeGinkgoWriter) AddEvent(event string) { + writer.EventStream = append(writer.EventStream, event) +} + +func (writer *FakeGinkgoWriter) Truncate() { + writer.EventStream = append(writer.EventStream, "TRUNCATE") +} + +func (writer *FakeGinkgoWriter) DumpOut() { + writer.EventStream = append(writer.EventStream, "DUMP") +} + +func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) { + writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header) +} + +func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) { + return 0, nil +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go new file mode 100644 index 00000000..7678fc1d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go @@ -0,0 +1,71 @@ +package writer + +import ( + "bytes" + "io" + "sync" +) + +type WriterInterface interface { + io.Writer + + Truncate() + DumpOut() + DumpOutWithHeader(header string) +} + +type Writer struct { + buffer *bytes.Buffer + outWriter io.Writer + lock *sync.Mutex + stream bool +} + +func New(outWriter io.Writer) *Writer { + return &Writer{ + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + stream: true, + } +} + +func (w *Writer) SetStream(stream bool) { + w.lock.Lock() + defer w.lock.Unlock() + w.stream = stream +} + +func (w *Writer) Write(b []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + if w.stream { + return w.outWriter.Write(b) + } else { + return w.buffer.Write(b) + } +} + +func (w *Writer) Truncate() { + w.lock.Lock() + defer w.lock.Unlock() + w.buffer.Reset() +} + +func (w *Writer) DumpOut() { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream { + w.buffer.WriteTo(w.outWriter) + } +} + +func (w *Writer) DumpOutWithHeader(header string) { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream && w.buffer.Len() > 0 { + w.outWriter.Write([]byte(header)) + w.buffer.WriteTo(w.outWriter) + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go new file mode 100644 index 00000000..044d2dfd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -0,0 +1,83 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type DefaultReporter struct { + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + specSummaries []*types.SpecSummary +} + +func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter { + return &DefaultReporter{ + config: config, + stenographer: stenographer, + } +} + +func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) + if config.ParallelTotal > 1 { + reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct) + } + reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) +} + +func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) { + if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + reporter.stenographer.AnnounceSpecWillRun(specSummary) + } +} + +func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) { + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct) + } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold { + reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceSuccesfulSpec(specSummary) + } + case types.SpecStatePending: + reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) + case types.SpecStateSkipped: + reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStateTimedOut: + reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStatePanicked: + reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStateFailed: + reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + } + + reporter.specSummaries = append(reporter.specSummaries, specSummary) +} + +func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.stenographer.SummarizeFailures(reporter.specSummaries) + reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go new file mode 100644 index 00000000..27db4794 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go @@ -0,0 +1,59 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//FakeReporter is useful for testing purposes +type FakeReporter struct { + Config config.GinkgoConfigType + + BeginSummary *types.SuiteSummary + BeforeSuiteSummary *types.SetupSummary + SpecWillRunSummaries []*types.SpecSummary + SpecSummaries []*types.SpecSummary + AfterSuiteSummary *types.SetupSummary + EndSummary *types.SuiteSummary + + SpecWillRunStub func(specSummary *types.SpecSummary) + SpecDidCompleteStub func(specSummary *types.SpecSummary) +} + +func NewFakeReporter() *FakeReporter { + return &FakeReporter{ + SpecWillRunSummaries: make([]*types.SpecSummary, 0), + SpecSummaries: make([]*types.SpecSummary, 0), + } +} + +func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + fakeR.Config = config + fakeR.BeginSummary = summary +} + +func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.BeforeSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) { + if fakeR.SpecWillRunStub != nil { + fakeR.SpecWillRunStub(specSummary) + } + fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary) +} + +func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) { + if fakeR.SpecDidCompleteStub != nil { + fakeR.SpecDidCompleteStub(specSummary) + } + fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary) +} + +func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.AfterSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fakeR.EndSummary = summary +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go new file mode 100644 index 00000000..278a88ed --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -0,0 +1,139 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" + "os" + "strings" +) + +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + TestCases []JUnitTestCase `xml:"testcase"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Time float64 `xml:"time,attr"` +} + +type JUnitTestCase struct { + Name string `xml:"name,attr"` + ClassName string `xml:"classname,attr"` + FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + Time float64 `xml:"time,attr"` +} + +type JUnitFailureMessage struct { + Type string `xml:"type,attr"` + Message string `xml:",chardata"` +} + +type JUnitSkipped struct { + XMLName xml.Name `xml:"skipped"` +} + +type JUnitReporter struct { + suite JUnitTestSuite + filename string + testSuiteName string +} + +//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. +func NewJUnitReporter(filename string) *JUnitReporter { + return &JUnitReporter{ + filename: filename, + } +} + +func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.suite = JUnitTestSuite{ + Tests: summary.NumberOfSpecsThatWillBeRun, + TestCases: []JUnitTestCase{}, + } + reporter.testSuiteName = summary.SuiteDescription +} + +func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { +} + +func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testCase := JUnitTestCase{ + Name: name, + ClassName: reporter.testSuiteName, + } + + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(setupSummary.State), + Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message), + } + testCase.Time = setupSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) + } +} + +func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testCase := JUnitTestCase{ + Name: strings.Join(specSummary.ComponentTexts[1:], " "), + ClassName: reporter.testSuiteName, + } + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(specSummary.State), + Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message), + } + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + testCase.Skipped = &JUnitSkipped{} + } + testCase.Time = specSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) +} + +func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.suite.Time = summary.RunTime.Seconds() + reporter.suite.Failures = summary.NumberOfFailedSpecs + file, err := os.Create(reporter.filename) + if err != nil { + fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error()) + } + defer file.Close() + file.WriteString(xml.Header) + encoder := xml.NewEncoder(file) + encoder.Indent(" ", " ") + err = encoder.Encode(reporter.suite) + if err != nil { + fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error()) + } +} + +func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string { + switch state { + case types.SpecStateFailed: + return "Failure" + case types.SpecStateTimedOut: + return "Timeout" + case types.SpecStatePanicked: + return "Panic" + default: + return "" + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go new file mode 100644 index 00000000..348b9dfc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/reporter.go @@ -0,0 +1,15 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type Reporter interface { + SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SpecSuiteDidEnd(summary *types.SuiteSummary) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go new file mode 100644 index 00000000..ce5433af --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go @@ -0,0 +1,64 @@ +package stenographer + +import ( + "fmt" + "strings" +) + +func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string { + var out string + + if len(args) > 0 { + out = fmt.Sprintf(format, args...) + } else { + out = format + } + + if s.color { + return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle) + } else { + return out + } +} + +func (s *consoleStenographer) printBanner(text string, bannerCharacter string) { + fmt.Println(text) + fmt.Println(strings.Repeat(bannerCharacter, len(text))) +} + +func (s *consoleStenographer) printNewLine() { + fmt.Println("") +} + +func (s *consoleStenographer) printDelimiter() { + fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30))) +} + +func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) { + fmt.Print(s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) { + fmt.Println(s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string { + var text string + + if len(args) > 0 { + text = fmt.Sprintf(format, args...) + } else { + text = format + } + + stringArray := strings.Split(text, "\n") + padding := "" + if indentation >= 0 { + padding = strings.Repeat(" ", indentation) + } + for i, s := range stringArray { + stringArray[i] = fmt.Sprintf("%s%s", padding, s) + } + + return strings.Join(stringArray, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go new file mode 100644 index 00000000..1ff6104c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go @@ -0,0 +1,138 @@ +package stenographer + +import ( + "sync" + + "github.com/onsi/ginkgo/types" +) + +func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall { + return FakeStenographerCall{ + Method: method, + Args: args, + } +} + +type FakeStenographer struct { + calls []FakeStenographerCall + lock *sync.Mutex +} + +type FakeStenographerCall struct { + Method string + Args []interface{} +} + +func NewFakeStenographer() *FakeStenographer { + stenographer := &FakeStenographer{ + lock: &sync.Mutex{}, + } + stenographer.Reset() + return stenographer +} + +func (stenographer *FakeStenographer) Calls() []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + return stenographer.calls +} + +func (stenographer *FakeStenographer) Reset() { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = make([]FakeStenographerCall, 0) +} + +func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + results := make([]FakeStenographerCall, 0) + for _, call := range stenographer.calls { + if call.Method == method { + results = append(results, call) + } + } + + return results +} + +func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...)) +} + +func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct) +} + +func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) +} + +func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) { + stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct) +} + +func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSpecWillRun", spec) +} + +func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace) +} +func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) { + stenographer.registerCall("AnnounceCapturedOutput", output) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSuccesfulSpec", spec) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + stenographer.registerCall("AnnouncePendingSpec", spec, noisy) +} + +func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + stenographer.registerCall("SummarizeFailures", summaries) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go new file mode 100644 index 00000000..5b5d905d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go @@ -0,0 +1,549 @@ +/* +The stenographer is used by Ginkgo's reporters to generate output. + +Move along, nothing to see here. +*/ + +package stenographer + +import ( + "fmt" + "runtime" + "strings" + + "github.com/onsi/ginkgo/types" +) + +const defaultStyle = "\x1b[0m" +const boldStyle = "\x1b[1m" +const redColor = "\x1b[91m" +const greenColor = "\x1b[32m" +const yellowColor = "\x1b[33m" +const cyanColor = "\x1b[36m" +const grayColor = "\x1b[90m" +const lightGrayColor = "\x1b[37m" + +type cursorStateType int + +const ( + cursorStateTop cursorStateType = iota + cursorStateStreaming + cursorStateMidBlock + cursorStateEndBlock +) + +type Stenographer interface { + AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) + AnnounceAggregatedParallelRun(nodes int, succinct bool) + AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) + AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) + AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) + + AnnounceSpecWillRun(spec *types.SpecSummary) + AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + + AnnounceCapturedOutput(output string) + + AnnounceSuccesfulSpec(spec *types.SpecSummary) + AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) + AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) + + AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) + AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) + + AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) + + SummarizeFailures(summaries []*types.SpecSummary) +} + +func New(color bool) Stenographer { + denoter := "•" + if runtime.GOOS == "windows" { + denoter = "+" + } + return &consoleStenographer{ + color: color, + denoter: denoter, + cursorState: cursorStateTop, + } +} + +type consoleStenographer struct { + color bool + denoter string + cursorState cursorStateType +} + +var alternatingColors = []string{defaultStyle, grayColor} + +func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + if succinct { + s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description)) + return + } + s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=") + s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed)) + if randomizingAll { + s.print(0, " - Will randomize all specs") + } + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) { + if succinct { + s.print(0, "- node #%d ", node) + return + } + s.println(0, + "Parallel test node %s/%s. Assigned %s of %s specs.", + s.colorize(boldStyle, "%d", node), + s.colorize(boldStyle, "%d", nodes), + s.colorize(boldStyle, "%d", specsToRun), + s.colorize(boldStyle, "%d", totalSpecs), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + if succinct { + s.print(0, "- %d nodes ", nodes) + return + } + s.println(0, + "Running in parallel across %s nodes", + s.colorize(boldStyle, "%d", nodes), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + if succinct { + s.print(0, "- %d/%d specs ", specsToRun, total) + s.stream() + return + } + s.println(0, + "Will run %s of %s specs", + s.colorize(boldStyle, "%d", specsToRun), + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + if succinct && summary.SuiteSucceeded { + s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) + return + } + s.printNewLine() + color := greenColor + if !summary.SuiteSucceeded { + color = redColor + } + s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds())) + + status := "" + if summary.SuiteSucceeded { + status = s.colorize(boldStyle+greenColor, "SUCCESS!") + } else { + status = s.colorize(boldStyle+redColor, "FAIL!") + } + + s.print(0, + "%s -- %s | %s | %s | %s ", + status, + s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs), + s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs), + s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs), + s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs), + ) +} + +func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + s.startBlock() + for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] { + s.print(0, s.colorize(alternatingColors[i%2], text)+" ") + } + + indentation := 0 + if len(spec.ComponentTexts) > 2 { + indentation = 1 + s.printNewLine() + } + index := len(spec.ComponentTexts) - 1 + s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index])) + s.printNewLine() + s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String())) + s.printNewLine() + s.midBlock() +} + +func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.startBlock() + var message string + switch summary.State { + case types.SpecStateFailed: + message = "Failure" + case types.SpecStatePanicked: + message = "Panic" + case types.SpecStateTimedOut: + message = "Timeout" + } + + s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true) + + s.printNewLine() + s.printFailure(indentation, summary.State, summary.Failure, fullTrace) + + s.endBlock() +} + +func (s *consoleStenographer) AnnounceCapturedOutput(output string) { + if output == "" { + return + } + + s.startBlock() + s.println(0, output) + s.midBlock() +} + +func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + s.print(0, s.colorize(greenColor, s.denoter)) + s.stream() +} + +func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()), + "", + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter), + s.measurementReport(spec, succinct), + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + if noisy { + s.printBlockWithMessage( + s.colorize(yellowColor, "P [PENDING]"), + "", + spec, + false, + ) + } else { + s.print(0, s.colorize(yellowColor, "P")) + s.stream() + } +} + +func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + // Skips at runtime will have a non-empty spec.Failure. All others should be succinct. + if succinct || spec.Failure == (types.SpecFailure{}) { + s.print(0, s.colorize(cyanColor, "S")) + s.stream() + } else { + s.startBlock() + s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printSkip(indentation, spec.Failure) + s.endBlock() + } +} + +func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + failingSpecs := []*types.SpecSummary{} + + for _, summary := range summaries { + if summary.HasFailureState() { + failingSpecs = append(failingSpecs, summary) + } + } + + if len(failingSpecs) == 0 { + return + } + + s.printNewLine() + s.printNewLine() + plural := "s" + if len(failingSpecs) == 1 { + plural = "" + } + s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural)) + for _, summary := range failingSpecs { + s.printNewLine() + if summary.HasFailureState() { + if summary.TimedOut() { + s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] ")) + } else if summary.Panicked() { + s.print(0, s.colorize(redColor+boldStyle, "[Panic!] ")) + } else if summary.Failed() { + s.print(0, s.colorize(redColor+boldStyle, "[Fail] ")) + } + s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true) + s.printNewLine() + s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String())) + } + } +} + +func (s *consoleStenographer) startBlock() { + if s.cursorState == cursorStateStreaming { + s.printNewLine() + s.printDelimiter() + } else if s.cursorState == cursorStateMidBlock { + s.printNewLine() + } +} + +func (s *consoleStenographer) midBlock() { + s.cursorState = cursorStateMidBlock +} + +func (s *consoleStenographer) endBlock() { + s.printDelimiter() + s.cursorState = cursorStateEndBlock +} + +func (s *consoleStenographer) stream() { + s.cursorState = cursorStateStreaming +} + +func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) { + s.startBlock() + s.println(0, header) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct) + + if message != "" { + s.printNewLine() + s.println(indentation, message) + } + + s.endBlock() +} + +func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.startBlock() + s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printFailure(indentation, spec.State, spec.Failure, fullTrace) + s.endBlock() +} + +func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string { + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + return " in Suite Setup (BeforeSuite)" + case types.SpecComponentTypeAfterSuite: + return " in Suite Teardown (AfterSuite)" + case types.SpecComponentTypeBeforeEach: + return " in Spec Setup (BeforeEach)" + case types.SpecComponentTypeJustBeforeEach: + return " in Spec Setup (JustBeforeEach)" + case types.SpecComponentTypeAfterEach: + return " in Spec Teardown (AfterEach)" + } + + return "" +} + +func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) { + s.println(indentation, s.colorize(cyanColor, spec.Message)) + s.printNewLine() + s.println(indentation, spec.Location.String()) +} + +func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) { + if state == types.SpecStatePanicked { + s.println(indentation, s.colorize(redColor+boldStyle, failure.Message)) + s.println(indentation, s.colorize(redColor, failure.ForwardedPanic)) + s.println(indentation, failure.Location.String()) + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } else { + s.println(indentation, s.colorize(redColor, failure.Message)) + s.printNewLine() + s.println(indentation, failure.Location.String()) + if fullTrace { + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } + } +} + +func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + startIndex := 1 + indentation := 0 + + if len(componentTexts) == 1 { + startIndex = 0 + } + + for i := startIndex; i < len(componentTexts); i++ { + if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex { + color := redColor + if state == types.SpecStateSkipped { + color = cyanColor + } + blockType := "" + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + blockType = "BeforeSuite" + case types.SpecComponentTypeAfterSuite: + blockType = "AfterSuite" + case types.SpecComponentTypeBeforeEach: + blockType = "BeforeEach" + case types.SpecComponentTypeJustBeforeEach: + blockType = "JustBeforeEach" + case types.SpecComponentTypeAfterEach: + blockType = "AfterEach" + case types.SpecComponentTypeIt: + blockType = "It" + case types.SpecComponentTypeMeasure: + blockType = "Measurement" + } + if succinct { + s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i])) + } else { + s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType)) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } else { + if succinct { + s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i])) + } else { + s.println(indentation, componentTexts[i]) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } + indentation++ + } + + return indentation +} + +func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct) + + if succinct { + if len(componentTexts) > 0 { + s.printNewLine() + s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1])) + } + s.printNewLine() + indentation = 1 + } else { + indentation-- + } + + return indentation +} + +func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string { + orderedKeys := make([]string, len(measurements)) + for key, measurement := range measurements { + orderedKeys[measurement.Order] = key + } + return orderedKeys +} + +func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string { + if len(spec.Measurements) == 0 { + return "Found no measurements" + } + + message := []string{} + orderedKeys := s.orderedMeasurementKeys(spec.Measurements) + + if succinct { + message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + measurement.SmallestLabel, + s.colorize(greenColor, "%.3f", measurement.Smallest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, "%.3f", measurement.Average), + measurement.Units, + s.colorize(cyanColor, "%.3f", measurement.StdDeviation), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, "%.3f", measurement.Largest), + measurement.Units, + )) + } + } else { + message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + info := "" + if measurement.Info != nil { + message = append(message, fmt.Sprintf("%v", measurement.Info)) + } + + message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + info, + measurement.SmallestLabel, + s.colorize(greenColor, "%.3f", measurement.Smallest), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, "%.3f", measurement.Largest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, "%.3f", measurement.Average), + measurement.Units, + s.colorize(cyanColor, "%.3f", measurement.StdDeviation), + measurement.Units, + )) + } + } + + return strings.Join(message, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go new file mode 100644 index 00000000..657dfe72 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go @@ -0,0 +1,92 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" + "io" + "strings" +) + +const ( + messageId = "##teamcity" +) + +type TeamCityReporter struct { + writer io.Writer + testSuiteName string +} + +func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { + return &TeamCityReporter{ + writer: writer, + } +} + +func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.testSuiteName = escape(summary.SuiteDescription) + fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName) +} + +func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testName := escape(name) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) + message := escape(setupSummary.Failure.ComponentCodeLocation.String()) + details := escape(setupSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) + } +} + +func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) +} + +func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + message := escape(specSummary.Failure.ComponentCodeLocation.String()) + details := escape(specSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName) + } + + durationInMilliseconds := specSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) +} + +func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName) +} + +func escape(output string) string { + output = strings.Replace(output, "|", "||", -1) + output = strings.Replace(output, "'", "|'", -1) + output = strings.Replace(output, "\n", "|n", -1) + output = strings.Replace(output, "\r", "|r", -1) + output = strings.Replace(output, "[", "|[", -1) + output = strings.Replace(output, "]", "|]", -1) + return output +} diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go new file mode 100644 index 00000000..935a89e1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/code_location.go @@ -0,0 +1,15 @@ +package types + +import ( + "fmt" +) + +type CodeLocation struct { + FileName string + LineNumber int + FullStackTrace string +} + +func (codeLocation CodeLocation) String() string { + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go new file mode 100644 index 00000000..fdd6ed5b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/synchronization.go @@ -0,0 +1,30 @@ +package types + +import ( + "encoding/json" +) + +type RemoteBeforeSuiteState int + +const ( + RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota + + RemoteBeforeSuiteStatePending + RemoteBeforeSuiteStatePassed + RemoteBeforeSuiteStateFailed + RemoteBeforeSuiteStateDisappeared +) + +type RemoteBeforeSuiteData struct { + Data []byte + State RemoteBeforeSuiteState +} + +func (r RemoteBeforeSuiteData) ToJSON() []byte { + data, _ := json.Marshal(r) + return data +} + +type RemoteAfterSuiteData struct { + CanRun bool +} diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go new file mode 100644 index 00000000..889612e0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/types.go @@ -0,0 +1,143 @@ +package types + +import "time" + +const GINKGO_FOCUS_EXIT_CODE = 197 + +type SuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + RunTime time.Duration +} + +type SpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*SpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s SpecSummary) HasFailureState() bool { + return s.State.IsFailure() +} + +func (s SpecSummary) TimedOut() bool { + return s.State == SpecStateTimedOut +} + +func (s SpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s SpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s SpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s SpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s SpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type SetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type SpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type SpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string +} + +type SpecState uint + +const ( + SpecStateInvalid SpecState = iota + + SpecStatePending + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStatePanicked + SpecStateTimedOut +) + +func (state SpecState) IsFailure() bool { + return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed +} + +type SpecComponentType uint + +const ( + SpecComponentTypeInvalid SpecComponentType = iota + + SpecComponentTypeContainer + SpecComponentTypeBeforeSuite + SpecComponentTypeAfterSuite + SpecComponentTypeBeforeEach + SpecComponentTypeJustBeforeEach + SpecComponentTypeAfterEach + SpecComponentTypeIt + SpecComponentTypeMeasure +) + +type FlagType uint + +const ( + FlagTypeNone FlagType = iota + FlagTypeFocused + FlagTypePending +) diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore new file mode 100644 index 00000000..55145320 --- /dev/null +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +*.test +. diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml new file mode 100644 index 00000000..79780ec5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -0,0 +1,11 @@ +language: go +go: + - 1.4 + - 1.5 + +install: + - go get -v ./... + - go get github.com/onsi/ginkgo + - go install github.com/onsi/ginkgo/ginkgo + +script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md new file mode 100644 index 00000000..0c5ede5d --- /dev/null +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -0,0 +1,70 @@ +## HEAD + +Improvements: + +- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout. +- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests. +- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel. +- Added `HavePrefix` and `HaveSuffix` matchers. +- `ghttp` can now handle concurrent requests. +- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`. +- Improved `ghttp`'s behavior around failing assertions and panics: + - If a registered handler makes a failing assertion `ghttp` will return `500`. + - If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive. +- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives. +- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher +- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers + +Bug Fixes: +- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure. +- `ContainElement` no longer bails if a passed-in matcher errors. + +## 1.0 (8/2/2014) + +No changes. Dropping "beta" from the version number. + +## 1.0.0-beta (7/8/2014) +Breaking Changes: + +- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead. +- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher + +New Test-Support Features: + +- `ghttp`: supports testing http clients + - Provides a flexible fake http server + - Provides a collection of chainable http handlers that perform assertions. +- `gbytes`: supports making ordered assertions against streams of data + - Provides a `gbytes.Buffer` + - Provides a `Say` matcher to perform ordered assertions against output data +- `gexec`: supports testing external processes + - Provides support for building Go binaries + - Wraps and starts `exec.Cmd` commands + - Makes it easy to assert against stdout and stderr + - Makes it easy to send signals and wait for processes to exit + - Provides an `Exit` matcher to assert against exit code. + +DSL Changes: + +- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs. +- The default timeouts for `Eventually` and `Consistently` are now configurable. + +New Matchers: + +- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map. +- `BeTemporally`: like `BeNumerically` but for `time.Time` +- `HaveKeyWithValue`: asserts a map has a given key with the given value. + +Updated Matchers: + +- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher. +- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Gomega's internal to `internal` diff --git a/vendor/github.com/onsi/gomega/LICENSE b/vendor/github.com/onsi/gomega/LICENSE new file mode 100644 index 00000000..9415ee72 --- /dev/null +++ b/vendor/github.com/onsi/gomega/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md new file mode 100644 index 00000000..c8255919 --- /dev/null +++ b/vendor/github.com/onsi/gomega/README.md @@ -0,0 +1,17 @@ +![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png) + +[![Build Status](https://travis-ci.org/onsi/gomega.png)](https://travis-ci.org/onsi/gomega) + +Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers). + +To discuss Gomega and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega). + +## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang + +Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/) + +## License + +Gomega is MIT-Licensed + +The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license. diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go new file mode 100644 index 00000000..ec9c91a4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -0,0 +1,276 @@ +/* +Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information. +*/ +package format + +import ( + "fmt" + "reflect" + "strings" +) + +// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects +var MaxDepth = uint(10) + +/* +By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output. + +Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead. + +Note that GoString and String don't always have all the information you need to understand why a test failed! +*/ +var UseStringerRepresentation = false + +//The default indentation string emitted by the format package +var Indent = " " + +var longFormThreshold = 20 + +/* +Generates a formatted matcher success/failure message of the form: + + Expected + + + + +If expected is omited, then the message looks like: + + Expected + + +*/ +func Message(actual interface{}, message string, expected ...interface{}) string { + if len(expected) == 0 { + return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) + } else { + return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1)) + } +} + +/* +Pretty prints the passed in object at the passed in indentation level. + +Object recurses into deeply nested objects emitting pretty-printed representations of their components. + +Modify format.MaxDepth to control how deep the recursion is allowed to go +Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of +recursing into the object. +*/ +func Object(object interface{}, indentation uint) string { + indent := strings.Repeat(Indent, int(indentation)) + value := reflect.ValueOf(object) + return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) +} + +/* +IndentString takes a string and indents each line by the specified amount. +*/ +func IndentString(s string, indentation uint) string { + components := strings.Split(s, "\n") + result := "" + indent := strings.Repeat(Indent, int(indentation)) + for i, component := range components { + result += indent + component + if i < len(components)-1 { + result += "\n" + } + } + + return result +} + +func formatType(object interface{}) string { + t := reflect.TypeOf(object) + if t == nil { + return "nil" + } + switch t.Kind() { + case reflect.Chan: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Ptr: + return fmt.Sprintf("%T | %p", object, object) + case reflect.Slice: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Map: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d", object, v.Len()) + default: + return fmt.Sprintf("%T", object) + } +} + +func formatValue(value reflect.Value, indentation uint) string { + if indentation > MaxDepth { + return "..." + } + + if isNilValue(value) { + return "nil" + } + + if UseStringerRepresentation { + if value.CanInterface() { + obj := value.Interface() + switch x := obj.(type) { + case fmt.GoStringer: + return x.GoString() + case fmt.Stringer: + return x.String() + } + } + } + + switch value.Kind() { + case reflect.Bool: + return fmt.Sprintf("%v", value.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", value.Uint()) + case reflect.Uintptr: + return fmt.Sprintf("0x%x", value.Uint()) + case reflect.Float32, reflect.Float64: + return fmt.Sprintf("%v", value.Float()) + case reflect.Complex64, reflect.Complex128: + return fmt.Sprintf("%v", value.Complex()) + case reflect.Chan: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Func: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Ptr: + return formatValue(value.Elem(), indentation) + case reflect.Slice: + if value.Type().Elem().Kind() == reflect.Uint8 { + return formatString(value.Bytes(), indentation) + } + return formatSlice(value, indentation) + case reflect.String: + return formatString(value.String(), indentation) + case reflect.Array: + return formatSlice(value, indentation) + case reflect.Map: + return formatMap(value, indentation) + case reflect.Struct: + return formatStruct(value, indentation) + case reflect.Interface: + return formatValue(value.Elem(), indentation) + default: + if value.CanInterface() { + return fmt.Sprintf("%#v", value.Interface()) + } else { + return fmt.Sprintf("%#v", value) + } + } +} + +func formatString(object interface{}, indentation uint) string { + if indentation == 1 { + s := fmt.Sprintf("%s", object) + components := strings.Split(s, "\n") + result := "" + for i, component := range components { + if i == 0 { + result += component + } else { + result += Indent + component + } + if i < len(components)-1 { + result += "\n" + } + } + + return fmt.Sprintf("%s", result) + } else { + return fmt.Sprintf("%q", object) + } +} + +func formatSlice(v reflect.Value, indentation uint) string { + l := v.Len() + result := make([]string, l) + longest := 0 + for i := 0; i < l; i++ { + result[i] = formatValue(v.Index(i), indentation+1) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } else { + return fmt.Sprintf("[%s]", strings.Join(result, ", ")) + } +} + +func formatMap(v reflect.Value, indentation uint) string { + l := v.Len() + result := make([]string, l) + + longest := 0 + for i, key := range v.MapKeys() { + value := v.MapIndex(key) + result[i] = fmt.Sprintf("%s: %s", formatValue(key, 0), formatValue(value, indentation+1)) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } else { + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) + } +} + +func formatStruct(v reflect.Value, indentation uint) string { + t := v.Type() + + l := v.NumField() + result := []string{} + longest := 0 + for i := 0; i < l; i++ { + structField := t.Field(i) + fieldEntry := v.Field(i) + representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1)) + result = append(result, representation) + if len(representation) > longest { + longest = len(representation) + } + } + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } else { + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) + } +} + +func isNilValue(a reflect.Value) bool { + switch a.Kind() { + case reflect.Invalid: + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return a.IsNil() + } + + return false +} + +func isNil(a interface{}) bool { + if a == nil { + return true + } + + switch reflect.TypeOf(a).Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return reflect.ValueOf(a).IsNil() + } + + return false +} diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer.go b/vendor/github.com/onsi/gomega/gbytes/buffer.go new file mode 100644 index 00000000..8775b861 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/buffer.go @@ -0,0 +1,229 @@ +/* +Package gbytes provides a buffer that supports incrementally detecting input. + +You use gbytes.Buffer with the gbytes.Say matcher. When Say finds a match, it fastforwards the buffer's read cursor to the end of that match. + +Subsequent matches against the buffer will only operate against data that appears *after* the read cursor. + +The read cursor is an opaque implementation detail that you cannot access. You should use the Say matcher to sift through the buffer. You can always +access the entire buffer's contents with Contents(). + +*/ +package gbytes + +import ( + "errors" + "fmt" + "io" + "regexp" + "sync" + "time" +) + +/* +gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher. + +You should only use a gbytes.Buffer in test code. It stores all writes in an in-memory buffer - behavior that is inappropriate for production code! +*/ +type Buffer struct { + contents []byte + readCursor uint64 + lock *sync.Mutex + detectCloser chan interface{} + closed bool +} + +/* +NewBuffer returns a new gbytes.Buffer +*/ +func NewBuffer() *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + } +} + +/* +BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes +*/ +func BufferWithBytes(bytes []byte) *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + contents: bytes, + } +} + +/* +Write implements the io.Writer interface +*/ +func (b *Buffer) Write(p []byte) (n int, err error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to write to closed buffer") + } + + b.contents = append(b.contents, p...) + return len(p), nil +} + +/* +Read implements the io.Reader interface. It advances the +cursor as it reads. + +Returns an error if called after Close. +*/ +func (b *Buffer) Read(d []byte) (int, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to read from closed buffer") + } + + if uint64(len(b.contents)) <= b.readCursor { + return 0, io.EOF + } + + n := copy(d, b.contents[b.readCursor:]) + b.readCursor += uint64(n) + + return n, nil +} + +/* +Close signifies that the buffer will no longer be written to +*/ +func (b *Buffer) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + b.closed = true + + return nil +} + +/* +Closed returns true if the buffer has been closed +*/ +func (b *Buffer) Closed() bool { + b.lock.Lock() + defer b.lock.Unlock() + + return b.closed +} + +/* +Contents returns all data ever written to the buffer. +*/ +func (b *Buffer) Contents() []byte { + b.lock.Lock() + defer b.lock.Unlock() + + contents := make([]byte, len(b.contents)) + copy(contents, b.contents) + return contents +} + +/* +Detect takes a regular expression and returns a channel. + +The channel will receive true the first time data matching the regular expression is written to the buffer. +The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region. + +You typically don't need to use Detect and should use the ghttp.Say matcher instead. Detect is useful, however, in cases where your code must +be branch and handle different outputs written to the buffer. + +For example, consider a buffer hooked up to the stdout of a client library. You may (or may not, depending on state outside of your control) need to authenticate the client library. + +You could do something like: + +select { +case <-buffer.Detect("You are not logged in"): + //log in +case <-buffer.Detect("Success"): + //carry on +case <-time.After(time.Second): + //welp +} +buffer.CancelDetects() + +You should always call CancelDetects after using Detect. This will close any channels that have not detected and clean up the goroutines that were spawned to support them. + +Finally, you can pass detect a format string followed by variadic arguments. This will construct the regexp using fmt.Sprintf. +*/ +func (b *Buffer) Detect(desired string, args ...interface{}) chan bool { + formattedRegexp := desired + if len(args) > 0 { + formattedRegexp = fmt.Sprintf(desired, args...) + } + re := regexp.MustCompile(formattedRegexp) + + b.lock.Lock() + defer b.lock.Unlock() + + if b.detectCloser == nil { + b.detectCloser = make(chan interface{}) + } + + closer := b.detectCloser + response := make(chan bool) + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + defer close(response) + for { + select { + case <-ticker.C: + b.lock.Lock() + data, cursor := b.contents[b.readCursor:], b.readCursor + loc := re.FindIndex(data) + b.lock.Unlock() + + if loc != nil { + response <- true + b.lock.Lock() + newCursorPosition := cursor + uint64(loc[1]) + if newCursorPosition >= b.readCursor { + b.readCursor = newCursorPosition + } + b.lock.Unlock() + return + } + case <-closer: + return + } + } + }() + + return response +} + +/* +CancelDetects cancels any pending detects and cleans up their goroutines. You should always call this when you're done with a set of Detect channels. +*/ +func (b *Buffer) CancelDetects() { + b.lock.Lock() + defer b.lock.Unlock() + + close(b.detectCloser) + b.detectCloser = nil +} + +func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) { + b.lock.Lock() + defer b.lock.Unlock() + + unreadBytes := b.contents[b.readCursor:] + copyOfUnreadBytes := make([]byte, len(unreadBytes)) + copy(copyOfUnreadBytes, unreadBytes) + + loc := re.FindIndex(unreadBytes) + + if loc != nil { + b.readCursor += uint64(loc[1]) + return true, copyOfUnreadBytes + } else { + return false, copyOfUnreadBytes + } +} diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go new file mode 100644 index 00000000..ce5ebcbf --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go @@ -0,0 +1,105 @@ +package gbytes + +import ( + "fmt" + "regexp" + + "github.com/onsi/gomega/format" +) + +//Objects satisfying the BufferProvider can be used with the Say matcher. +type BufferProvider interface { + Buffer() *Buffer +} + +/* +Say is a Gomega matcher that operates on gbytes.Buffers: + + Ω(buffer).Should(Say("something")) + +will succeed if the unread portion of the buffer matches the regular expression "something". + +When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match. +Thus, subsequent calls to Say will only match against the unread portion of the buffer + +Say pairs very well with Eventually. To asser that a buffer eventually receives data matching "[123]-star" within 3 seconds you can: + + Eventually(buffer, 3).Should(Say("[123]-star")) + +Ditto with consistently. To assert that a buffer does not receive data matching "never-see-this" for 1 second you can: + + Consistently(buffer, 1).ShouldNot(Say("never-see-this")) + +In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface. +In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer() + +If the buffer is closed, the Say matcher will tell Eventually to abort. +*/ +func Say(expected string, args ...interface{}) *sayMatcher { + formattedRegexp := expected + if len(args) > 0 { + formattedRegexp = fmt.Sprintf(expected, args...) + } + return &sayMatcher{ + re: regexp.MustCompile(formattedRegexp), + } +} + +type sayMatcher struct { + re *regexp.Regexp + receivedSayings []byte +} + +func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) { + var buffer *Buffer + + switch x := actual.(type) { + case *Buffer: + buffer = x + case BufferProvider: + buffer = x.Buffer() + default: + return nil, false + } + + return buffer, true +} + +func (m *sayMatcher) Match(actual interface{}) (success bool, err error) { + buffer, ok := m.buffer(actual) + if !ok { + return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider. Got:\n%s", format.Object(actual, 1)) + } + + didSay, sayings := buffer.didSay(m.re) + m.receivedSayings = sayings + + return didSay, nil +} + +func (m *sayMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Got stuck at:\n%s\nWaiting for:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Saw:\n%s\nWhich matches the unexpected:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + switch x := actual.(type) { + case *Buffer: + return !x.Closed() + case BufferProvider: + return !x.Buffer().Closed() + default: + return true + } +} diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go new file mode 100644 index 00000000..3e9bf9f9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/build.go @@ -0,0 +1,78 @@ +package gexec + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" +) + +var tmpDir string + +/* +Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory. +A path pointing to this binary is returned. + +Build uses the $GOPATH set in your environment. It passes the variadic args on to `go build`. +*/ +func Build(packagePath string, args ...string) (compiledPath string, err error) { + return BuildIn(os.Getenv("GOPATH"), packagePath, args...) +} + +/* +BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument). +*/ +func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) { + tmpDir, err := temporaryDirectory() + if err != nil { + return "", err + } + + if len(gopath) == 0 { + return "", errors.New("$GOPATH not provided when building " + packagePath) + } + + executable := filepath.Join(tmpDir, path.Base(packagePath)) + if runtime.GOOS == "windows" { + executable = executable + ".exe" + } + + cmdArgs := append([]string{"build"}, args...) + cmdArgs = append(cmdArgs, "-o", executable, packagePath) + + build := exec.Command("go", cmdArgs...) + build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...) + + output, err := build.CombinedOutput() + if err != nil { + return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output)) + } + + return executable, nil +} + +/* +You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by +gexec. In Ginkgo this is typically done in an AfterSuite callback. +*/ +func CleanupBuildArtifacts() { + if tmpDir != "" { + os.RemoveAll(tmpDir) + } +} + +func temporaryDirectory() (string, error) { + var err error + if tmpDir == "" { + tmpDir, err = ioutil.TempDir("", "gexec_artifacts") + if err != nil { + return "", err + } + } + + return ioutil.TempDir(tmpDir, "g") +} diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go new file mode 100644 index 00000000..e6f43294 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go @@ -0,0 +1,88 @@ +package gexec + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +/* +The Exit matcher operates on a session: + + Ω(session).Should(Exit()) + +Exit passes if the session has already exited. + +If no status code is provided, then Exit will succeed if the session has exited regardless of exit code. +Otherwise, Exit will only succeed if the process has exited with the provided status code. + +Note that the process must have already exited. To wait for a process to exit, use Eventually: + + Eventually(session, 3).Should(Exit(0)) +*/ +func Exit(optionalExitCode ...int) *exitMatcher { + exitCode := -1 + if len(optionalExitCode) > 0 { + exitCode = optionalExitCode[0] + } + + return &exitMatcher{ + exitCode: exitCode, + } +} + +type exitMatcher struct { + exitCode int + didExit bool + actualExitCode int +} + +type Exiter interface { + ExitCode() int +} + +func (m *exitMatcher) Match(actual interface{}) (success bool, err error) { + exiter, ok := actual.(Exiter) + if !ok { + return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1)) + } + + m.actualExitCode = exiter.ExitCode() + + if m.actualExitCode == -1 { + return false, nil + } + + if m.exitCode == -1 { + return true, nil + } + return m.exitCode == m.actualExitCode, nil +} + +func (m *exitMatcher) FailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "Expected process to exit. It did not." + } else { + return format.Message(m.actualExitCode, "to match exit code:", m.exitCode) + } +} + +func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "you really shouldn't be able to see this!" + } else { + if m.exitCode == -1 { + return "Expected process not to exit. It did." + } else { + return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode) + } + } +} + +func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + session, ok := actual.(*Session) + if ok { + return session.ExitCode() == -1 + } + return true +} diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go new file mode 100644 index 00000000..05e695ab --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go @@ -0,0 +1,53 @@ +package gexec + +import ( + "io" + "sync" +) + +/* +PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line. +This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each +session by passing in a PrefixedWriter: + +gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter)) +*/ +type PrefixedWriter struct { + prefix []byte + writer io.Writer + lock *sync.Mutex + atStartOfLine bool +} + +func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter { + return &PrefixedWriter{ + prefix: []byte(prefix), + writer: writer, + lock: &sync.Mutex{}, + atStartOfLine: true, + } +} + +func (w *PrefixedWriter) Write(b []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + + toWrite := []byte{} + + for _, c := range b { + if w.atStartOfLine { + toWrite = append(toWrite, w.prefix...) + } + + toWrite = append(toWrite, c) + + w.atStartOfLine = c == '\n' + } + + _, err := w.writer.Write(toWrite) + if err != nil { + return 0, err + } + + return len(b), nil +} diff --git a/vendor/github.com/onsi/gomega/gexec/session.go b/vendor/github.com/onsi/gomega/gexec/session.go new file mode 100644 index 00000000..46e71223 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/session.go @@ -0,0 +1,214 @@ +/* +Package gexec provides support for testing external processes. +*/ +package gexec + +import ( + "io" + "os" + "os/exec" + "reflect" + "sync" + "syscall" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" +) + +const INVALID_EXIT_CODE = 254 + +type Session struct { + //The wrapped command + Command *exec.Cmd + + //A *gbytes.Buffer connected to the command's stdout + Out *gbytes.Buffer + + //A *gbytes.Buffer connected to the command's stderr + Err *gbytes.Buffer + + //A channel that will close when the command exits + Exited <-chan struct{} + + lock *sync.Mutex + exitCode int +} + +/* +Start starts the passed-in *exec.Cmd command. It wraps the command in a *gexec.Session. + +The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err. +These buffers can be used with the gbytes.Say matcher to match against unread output: + + Ω(session.Out).Should(gbytes.Say("foo-out")) + Ω(session.Err).Should(gbytes.Say("foo-err")) + +In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer. This allows you to replace the first line, above, with: + + Ω(session).Should(gbytes.Say("foo-out")) + +When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter. +This is useful for capturing the process's output or logging it to screen. In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter: + + session, err := Start(command, GinkgoWriter, GinkgoWriter) + +This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails. + +The session wrapper is responsible for waiting on the *exec.Cmd command. You *should not* call command.Wait() yourself. +Instead, to assert that the command has exited you can use the gexec.Exit matcher: + + Ω(session).Should(gexec.Exit()) + +When the session exits it closes the stdout and stderr gbytes buffers. This will short circuit any +Eventuallys waiting fo the buffers to Say something. +*/ +func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) { + exited := make(chan struct{}) + + session := &Session{ + Command: command, + Out: gbytes.NewBuffer(), + Err: gbytes.NewBuffer(), + Exited: exited, + lock: &sync.Mutex{}, + exitCode: -1, + } + + var commandOut, commandErr io.Writer + + commandOut, commandErr = session.Out, session.Err + + if outWriter != nil && !reflect.ValueOf(outWriter).IsNil() { + commandOut = io.MultiWriter(commandOut, outWriter) + } + + if errWriter != nil && !reflect.ValueOf(errWriter).IsNil() { + commandErr = io.MultiWriter(commandErr, errWriter) + } + + command.Stdout = commandOut + command.Stderr = commandErr + + err := command.Start() + if err == nil { + go session.monitorForExit(exited) + } + + return session, err +} + +/* +Buffer implements the gbytes.BufferProvider interface and returns s.Out +This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out: + + Eventually(session).Should(gbytes.Say("foo")) +*/ +func (s *Session) Buffer() *gbytes.Buffer { + return s.Out +} + +/* +ExitCode returns the wrapped command's exit code. If the command hasn't exited yet, ExitCode returns -1. + +To assert that the command has exited it is more convenient to use the Exit matcher: + + Eventually(s).Should(gexec.Exit()) + +When the process exits because it has received a particular signal, the exit code will be 128+signal-value +(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html) + +*/ +func (s *Session) ExitCode() int { + s.lock.Lock() + defer s.lock.Unlock() + return s.exitCode +} + +/* +Wait waits until the wrapped command exits. It can be passed an optional timeout. +If the command does not exit within the timeout, Wait will trigger a test failure. + +Wait returns the session, making it possible to chain: + + session.Wait().Out.Contents() + +will wait for the command to exit then return the entirety of Out's contents. + +Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does. +*/ +func (s *Session) Wait(timeout ...interface{}) *Session { + EventuallyWithOffset(1, s, timeout...).Should(Exit()) + return s +} + +/* +Kill sends the running command a SIGKILL signal. It does not wait for the process to exit. + +If the command has already exited, Kill returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Kill() *Session { + if s.ExitCode() != -1 { + return s + } + s.Command.Process.Kill() + return s +} + +/* +Interrupt sends the running command a SIGINT signal. It does not wait for the process to exit. + +If the command has already exited, Interrupt returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Interrupt() *Session { + return s.Signal(syscall.SIGINT) +} + +/* +Terminate sends the running command a SIGTERM signal. It does not wait for the process to exit. + +If the command has already exited, Terminate returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Terminate() *Session { + return s.Signal(syscall.SIGTERM) +} + +/* +Terminate sends the running command the passed in signal. It does not wait for the process to exit. + +If the command has already exited, Signal returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Signal(signal os.Signal) *Session { + if s.ExitCode() != -1 { + return s + } + s.Command.Process.Signal(signal) + return s +} + +func (s *Session) monitorForExit(exited chan<- struct{}) { + err := s.Command.Wait() + s.lock.Lock() + s.Out.Close() + s.Err.Close() + status := s.Command.ProcessState.Sys().(syscall.WaitStatus) + if status.Signaled() { + s.exitCode = 128 + int(status.Signal()) + } else { + exitStatus := status.ExitStatus() + if exitStatus == -1 && err != nil { + s.exitCode = INVALID_EXIT_CODE + } + s.exitCode = exitStatus + } + s.lock.Unlock() + + close(exited) +} diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go new file mode 100644 index 00000000..78bd188c --- /dev/null +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -0,0 +1,335 @@ +/* +Gomega is the Ginkgo BDD-style testing framework's preferred matcher library. + +The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/ + +Gomega on Github: http://github.com/onsi/gomega + +Learn more about Ginkgo online: http://onsi.github.io/ginkgo + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Gomega is MIT-Licensed +*/ +package gomega + +import ( + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/asyncassertion" + "github.com/onsi/gomega/internal/testingtsupport" + "github.com/onsi/gomega/types" +) + +const GOMEGA_VERSION = "1.0" + +const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. +If you're using Ginkgo then you probably forgot to put your assertion in an It(). +Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT(). +` + +var globalFailHandler types.GomegaFailHandler + +var defaultEventuallyTimeout = time.Second +var defaultEventuallyPollingInterval = 10 * time.Millisecond +var defaultConsistentlyDuration = 100 * time.Millisecond +var defaultConsistentlyPollingInterval = 10 * time.Millisecond + +//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails +//the fail handler passed into RegisterFailHandler is called. +func RegisterFailHandler(handler types.GomegaFailHandler) { + globalFailHandler = handler +} + +//RegisterTestingT connects Gomega to Golang's XUnit style +//Testing.T tests. You'll need to call this at the top of each XUnit style test: +// +// func TestFarmHasCow(t *testing.T) { +// RegisterTestingT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +// +// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to +// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests +// in parallel as the global fail handler cannot point to more than one testing.T at a time. +// +// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). +func RegisterTestingT(t types.GomegaTestingT) { + RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t)) +} + +//InterceptGomegaHandlers runs a given callback and returns an array of +//failure messages generated by any Gomega assertions within the callback. +// +//This is accomplished by temporarily replacing the *global* fail handler +//with a fail handler that simply annotates failures. The original fail handler +//is reset when InterceptGomegaFailures returns. +// +//This is most useful when testing custom matchers, but can also be used to check +//on a value using a Gomega assertion without causing a test failure. +func InterceptGomegaFailures(f func()) []string { + originalHandler := globalFailHandler + failures := []string{} + RegisterFailHandler(func(message string, callerSkip ...int) { + failures = append(failures, message) + }) + f() + RegisterFailHandler(originalHandler) + return failures +} + +//Ω wraps an actual value allowing assertions to be made on it: +// Ω("foo").Should(Equal("foo")) +// +//If Ω is passed more than one argument it will pass the *first* argument to the matcher. +//All subsequent arguments will be required to be nil/zero. +// +//This is convenient if you want to make an assertion on a method/function that returns +//a value and an error - a common patter in Go. +// +//For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +//Then: +// Ω(MyAmazingThing()).Should(Equal(3)) +//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +//Ω and Expect are identical +func Ω(actual interface{}, extra ...interface{}) GomegaAssertion { + return ExpectWithOffset(0, actual, extra...) +} + +//Expect wraps an actual value allowing assertions to be made on it: +// Expect("foo").To(Equal("foo")) +// +//If Expect is passed more than one argument it will pass the *first* argument to the matcher. +//All subsequent arguments will be required to be nil/zero. +// +//This is convenient if you want to make an assertion on a method/function that returns +//a value and an error - a common patter in Go. +// +//For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +//Then: +// Expect(MyAmazingThing()).Should(Equal(3)) +//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +//Expect and Ω are identical +func Expect(actual interface{}, extra ...interface{}) GomegaAssertion { + return ExpectWithOffset(0, actual, extra...) +} + +//ExpectWithOffset wraps an actual value allowing assertions to be made on it: +// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument +//this is used to modify the call-stack offset when computing line numbers. +// +//This is most useful in helper functions that make assertions. If you want Gomega's +//error message to refer to the calling line in the test (as opposed to the line in the helper function) +//set the first argument of `ExpectWithOffset` appropriately. +func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + return assertion.New(actual, globalFailHandler, offset, extra...) +} + +//Eventually wraps an actual value allowing assertions to be made on it. +//The assertion is tried periodically until it passes or a timeout occurs. +// +//Both the timeout and polling interval are configurable as optional arguments: +//The first optional argument is the timeout +//The second optional argument is the polling interval +// +//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +//last case they are interpreted as seconds. +// +//If Eventually is passed an actual that is a function taking no arguments and returning at least one value, +//then Eventually will call the function periodically and try the matcher against the function's first return value. +// +//Example: +// +// Eventually(func() int { +// return thingImPolling.Count() +// }).Should(BeNumerically(">=", 17)) +// +//Note that this example could be rewritten: +// +// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) +// +//If the function returns more than one value, then Eventually will pass the first value to the matcher and +//assert that all other values are nil/zero. +//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. +// +//For example, consider a method that returns a value and an error: +// func FetchFromDB() (string, error) +// +//Then +// Eventually(FetchFromDB).Should(Equal("hasselhoff")) +// +//Will pass only if the the returned error is nil and the returned string passes the matcher. +// +//Eventually's default timeout is 1 second, and its default polling interval is 10ms +func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + return EventuallyWithOffset(0, actual, intervals...) +} + +//EventuallyWithOffset operates like Eventually but takes an additional +//initial argument to indicate an offset in the call stack. This is useful when building helper +//functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) +} + +//Consistently wraps an actual value allowing assertions to be made on it. +//The assertion is tried periodically and is required to pass for a period of time. +// +//Both the total time and polling interval are configurable as optional arguments: +//The first optional argument is the duration that Consistently will run for +//The second optional argument is the polling interval +// +//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +//last case they are interpreted as seconds. +// +//If Consistently is passed an actual that is a function taking no arguments and returning at least one value, +//then Consistently will call the function periodically and try the matcher against the function's first return value. +// +//If the function returns more than one value, then Consistently will pass the first value to the matcher and +//assert that all other values are nil/zero. +//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. +// +//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. +//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: +// +// Consistently(channel).ShouldNot(Receive()) +// +//Consistently's default duration is 100ms, and its default polling interval is 10ms +func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + return ConsistentlyWithOffset(0, actual, intervals...) +} + +//ConsistentlyWithOffset operates like Consistnetly but takes an additional +//initial argument to indicate an offset in the call stack. This is useful when building helper +//functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) +} + +//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. +func SetDefaultEventuallyTimeout(t time.Duration) { + defaultEventuallyTimeout = t +} + +//Set the default polling interval for Eventually. +func SetDefaultEventuallyPollingInterval(t time.Duration) { + defaultEventuallyPollingInterval = t +} + +//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long. +func SetDefaultConsistentlyDuration(t time.Duration) { + defaultConsistentlyDuration = t +} + +//Set the default polling interval for Consistently. +func SetDefaultConsistentlyPollingInterval(t time.Duration) { + defaultConsistentlyPollingInterval = t +} + +//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against +//the matcher passed to the Should and ShouldNot methods. +// +//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to +//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more +//descriptive +// +//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. +// +//Example: +// +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") +type GomegaAsyncAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher +//passed to the Should/ShouldNot and To/ToNot/NotTo methods. +// +//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect +//though this is not enforced. +// +//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() +//and is used to annotate failure messages. +// +//All methods return a bool that is true if hte assertion passed and false if it failed. +// +//Example: +// +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +type GomegaAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + + To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it +type OmegaMatcher types.GomegaMatcher + +func toDuration(input interface{}) time.Duration { + duration, ok := input.(time.Duration) + if ok { + return duration + } + + value := reflect.ValueOf(input) + kind := reflect.TypeOf(input).Kind() + + if reflect.Int <= kind && kind <= reflect.Int64 { + return time.Duration(value.Int()) * time.Second + } else if reflect.Uint <= kind && kind <= reflect.Uint64 { + return time.Duration(value.Uint()) * time.Second + } else if reflect.Float32 <= kind && kind <= reflect.Float64 { + return time.Duration(value.Float() * float64(time.Second)) + } else if reflect.String == kind { + duration, err := time.ParseDuration(value.String()) + if err != nil { + panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + } + return duration + } + + panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go new file mode 100644 index 00000000..b73673f2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go @@ -0,0 +1,98 @@ +package assertion + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/types" +) + +type Assertion struct { + actualInput interface{} + fail types.GomegaFailHandler + offset int + extra []interface{} +} + +func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion { + return &Assertion{ + actualInput: actualInput, + fail: fail, + offset: offset, + extra: extra, + } +} + +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + matches, err := matcher.Match(assertion.actualInput) + description := assertion.buildDescription(optionalDescription...) + if err != nil { + assertion.fail(description+err.Error(), 2+assertion.offset) + return false + } + if matches != desiredMatch { + var message string + if desiredMatch { + message = matcher.FailureMessage(assertion.actualInput) + } else { + message = matcher.NegatedFailureMessage(assertion.actualInput) + } + assertion.fail(description+message, 2+assertion.offset) + return false + } + + return true +} + +func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool { + success, message := vetExtras(assertion.extra) + if success { + return true + } + + description := assertion.buildDescription(optionalDescription...) + assertion.fail(description+message, 2+assertion.offset) + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go new file mode 100644 index 00000000..bce08530 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go @@ -0,0 +1,189 @@ +package asyncassertion + +import ( + "errors" + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AsyncAssertionType uint + +const ( + AsyncAssertionTypeEventually AsyncAssertionType = iota + AsyncAssertionTypeConsistently +) + +type AsyncAssertion struct { + asyncType AsyncAssertionType + actualInput interface{} + timeoutInterval time.Duration + pollingInterval time.Duration + fail types.GomegaFailHandler + offset int +} + +func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { + actualType := reflect.TypeOf(actualInput) + if actualType.Kind() == reflect.Func { + if actualType.NumIn() != 0 || actualType.NumOut() == 0 { + panic("Expected a function with no arguments and one or more return values.") + } + } + + return &AsyncAssertion{ + asyncType: asyncType, + actualInput: actualInput, + fail: fail, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + offset: offset, + } +} + +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *AsyncAssertion) actualInputIsAFunction() bool { + actualType := reflect.TypeOf(assertion.actualInput) + return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0 +} + +func (assertion *AsyncAssertion) pollActual() (interface{}, error) { + if assertion.actualInputIsAFunction() { + values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{}) + + extras := []interface{}{} + for _, value := range values[1:] { + extras = append(extras, value.Interface()) + } + + success, message := vetExtras(extras) + + if !success { + return nil, errors.New(message) + } + + return values[0].Interface(), nil + } + + return assertion.actualInput, nil +} + +func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualInputIsAFunction() { + return true + } + + return oraclematcher.MatchMayChangeInTheFuture(matcher, value) +} + +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + timer := time.Now() + timeout := time.After(assertion.timeoutInterval) + + description := assertion.buildDescription(optionalDescription...) + + var matches bool + var err error + mayChange := true + value, err := assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + + fail := func(preamble string) { + errMsg := "" + message := "" + if err != nil { + errMsg = "Error: " + err.Error() + } else { + if desiredMatch { + message = matcher.FailureMessage(value) + } else { + message = matcher.NegatedFailureMessage(value) + } + } + assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + } + + if assertion.asyncType == AsyncAssertionTypeEventually { + for { + if err == nil && matches == desiredMatch { + return true + } + + if !mayChange { + fail("No future change is possible. Bailing out early") + return false + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + fail("Timed out") + return false + } + } + } else if assertion.asyncType == AsyncAssertionTypeConsistently { + for { + if !(err == nil && matches == desiredMatch) { + fail("Failed") + return false + } + + if !mayChange { + return true + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + return true + } + } + } + + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go new file mode 100644 index 00000000..66cad88a --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go @@ -0,0 +1,25 @@ +package oraclematcher + +import "github.com/onsi/gomega/types" + +/* +GomegaMatchers that also match the OracleMatcher interface can convey information about +whether or not their result will change upon future attempts. + +This allows `Eventually` and `Consistently` to short circuit if success becomes impossible. + +For example, a process' exit code can never change. So, gexec's Exit matcher returns `true` +for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. +*/ +type OracleMatcher interface { + MatchMayChangeInTheFuture(actual interface{}) bool +} + +func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool { + oracleMatcher, ok := matcher.(OracleMatcher) + if !ok { + return true + } + + return oracleMatcher.MatchMayChangeInTheFuture(value) +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go new file mode 100644 index 00000000..7871fd43 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go @@ -0,0 +1,40 @@ +package testingtsupport + +import ( + "regexp" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/types" +) + +type gomegaTestingT interface { + Errorf(format string, args ...interface{}) +} + +func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler { + return func(message string, callerSkip ...int) { + skip := 1 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + stackTrace := pruneStack(string(debug.Stack()), skip) + t.Errorf("\n%s\n%s", stackTrace, message) + } +} + +func pruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + if !re.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go new file mode 100644 index 00000000..b6110c4b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -0,0 +1,393 @@ +package gomega + +import ( + "time" + + "github.com/onsi/gomega/matchers" + "github.com/onsi/gomega/types" +) + +//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about +//types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func Equal(expected interface{}) types.GomegaMatcher { + return &matchers.EqualMatcher{ + Expected: expected, + } +} + +//BeEquivalentTo is more lax than Equal, allowing equality between different types. +//This is done by converting actual to have the type of expected before +//attempting equality with reflect.DeepEqual. +//It is an error for actual and expected to be nil. Use BeNil() instead. +func BeEquivalentTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeEquivalentToMatcher{ + Expected: expected, + } +} + +//BeNil succeeds if actual is nil +func BeNil() types.GomegaMatcher { + return &matchers.BeNilMatcher{} +} + +//BeTrue succeeds if actual is true +func BeTrue() types.GomegaMatcher { + return &matchers.BeTrueMatcher{} +} + +//BeFalse succeeds if actual is false +func BeFalse() types.GomegaMatcher { + return &matchers.BeFalseMatcher{} +} + +//HaveOccurred succeeds if actual is a non-nil error +//The typical Go error checking pattern looks like: +// err := SomethingThatMightFail() +// Ω(err).ShouldNot(HaveOccurred()) +func HaveOccurred() types.GomegaMatcher { + return &matchers.HaveOccurredMatcher{} +} + +//Succeed passes if actual is a nil error +//Succeed is intended to be used with functions that return a single error value. Instead of +// err := SomethingThatMightFail() +// Ω(err).ShouldNot(HaveOccurred()) +// +//You can write: +// Ω(SomethingThatMightFail()).Should(Succeed()) +// +//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect +//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. +//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. +func Succeed() types.GomegaMatcher { + return &matchers.SucceedMatcher{} +} + +//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// +//These are valid use-cases: +// Ω(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Ω(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// +//It is an error for err to be nil or an object that does not implement the Error interface +func MatchError(expected interface{}) types.GomegaMatcher { + return &matchers.MatchErrorMatcher{ + Expected: expected, + } +} + +//BeClosed succeeds if actual is a closed channel. +//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil +// +//In order to check whether or not the channel is closed, Gomega must try to read from the channel +//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about +//values coming down the channel. +// +//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before +//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). +// +//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. +func BeClosed() types.GomegaMatcher { + return &matchers.BeClosedMatcher{} +} + +//Receive succeeds if there is a value to be received on actual. +//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// +//Receive returns immediately and never blocks: +// +//- If there is nothing on the channel `c` then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If the channel `c` is closed then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If there is something on the channel `c` ready to be read, then Ω(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. +// +//If you have a go-routine running in the background that will write to channel `c` you can: +// Eventually(c).Should(Receive()) +// +//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) +// +//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: +// Consistently(c).ShouldNot(Receive()) +// +//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: +// Ω(c).Should(Receive(Equal("foo"))) +// +//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. +// +//Passing Receive a matcher is especially useful when paired with Eventually: +// +// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// +//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// +//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing)) +// Ω(myThing.Sprocket).Should(Equal("foo")) +// Ω(myThing.IsValid()).Should(BeTrue()) +func Receive(args ...interface{}) types.GomegaMatcher { + var arg interface{} + if len(args) > 0 { + arg = args[0] + } + + return &matchers.ReceiveMatcher{ + Arg: arg, + } +} + +//BeSent succeeds if a value can be sent to actual. +//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. +//In addition, actual must not be closed. +// +//BeSent never blocks: +// +//- If the channel `c` is not ready to receive then Ω(c).Should(BeSent("foo")) will fail immediately +//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout +//- If the channel `c` is closed then Ω(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately +// +//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). +//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. +func BeSent(arg interface{}) types.GomegaMatcher { + return &matchers.BeSentMatcher{ + Arg: arg, + } +} + +//MatchRegexp succeeds if actual is a string or stringer that matches the +//passed-in regexp. Optional arguments can be provided to construct a regexp +//via fmt.Sprintf(). +func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { + return &matchers.MatchRegexpMatcher{ + Regexp: regexp, + Args: args, + } +} + +//ContainSubstring succeeds if actual is a string or stringer that contains the +//passed-in regexp. Optional arguments can be provided to construct the substring +//via fmt.Sprintf(). +func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { + return &matchers.ContainSubstringMatcher{ + Substr: substr, + Args: args, + } +} + +//HavePrefix succeeds if actual is a string or stringer that contains the +//passed-in string as a prefix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HavePrefixMatcher{ + Prefix: prefix, + Args: args, + } +} + +//HaveSuffix succeeds if actual is a string or stringer that contains the +//passed-in string as a suffix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HaveSuffixMatcher{ + Suffix: suffix, + Args: args, + } +} + +//MatchJSON succeeds if actual is a string or stringer of JSON that matches +//the expected JSON. The JSONs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchJSON(json interface{}) types.GomegaMatcher { + return &matchers.MatchJSONMatcher{ + JSONToMatch: json, + } +} + +//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. +func BeEmpty() types.GomegaMatcher { + return &matchers.BeEmptyMatcher{} +} + +//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. +func HaveLen(count int) types.GomegaMatcher { + return &matchers.HaveLenMatcher{ + Count: count, + } +} + +//BeZero succeeds if actual is the zero value for its type or if actual is nil. +func BeZero() types.GomegaMatcher { + return &matchers.BeZeroMatcher{} +} + +//ContainElement succeeds if actual contains the passed in element. +//By default ContainElement() uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Ω([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// +//Actual must be an array, slice or map. +//For maps, ContainElement searches through the map's values. +func ContainElement(element interface{}) types.GomegaMatcher { + return &matchers.ContainElementMatcher{ + Element: element, + } +} + +//ConsistOf succeeds if actual contains preciely the elements passed into the matcher. The ordering of the elements does not matter. +//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// +//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. +// +//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it +//is the only element passed in to ConsistOf: +// +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// +//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. +func ConsistOf(elements ...interface{}) types.GomegaMatcher { + return &matchers.ConsistOfMatcher{ + Elements: elements, + } +} + +//HaveKey succeeds if actual is a map with the passed in key. +//By default HaveKey uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) +func HaveKey(key interface{}) types.GomegaMatcher { + return &matchers.HaveKeyMatcher{ + Key: key, + } +} + +//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. +//By default HaveKeyWithValue uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) +// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) +func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { + return &matchers.HaveKeyWithValueMatcher{ + Key: key, + Value: value, + } +} + +//BeNumerically performs numerical assertions in a type-agnostic way. +//Actual and expected should be numbers, though the specific type of +//number is irrelevant (floa32, float64, uint8, etc...). +// +//There are six, self-explanatory, supported comparators: +// Ω(1.0).Should(BeNumerically("==", 1)) +// Ω(1.0).Should(BeNumerically("~", 0.999, 0.01)) +// Ω(1.0).Should(BeNumerically(">", 0.9)) +// Ω(1.0).Should(BeNumerically(">=", 1.0)) +// Ω(1.0).Should(BeNumerically("<", 3)) +// Ω(1.0).Should(BeNumerically("<=", 1.0)) +func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { + return &matchers.BeNumericallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + } +} + +//BeTemporally compares time.Time's like BeNumerically +//Actual and expected must be time.Time. The comparators are the same as for BeNumerically +// Ω(time.Now()).Should(BeTemporally(">", time.Time{})) +// Ω(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) +func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { + return &matchers.BeTemporallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + Threshold: threshold, + } +} + +//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. +//It will return an error when one of the values is nil. +// Ω(0).Should(BeAssignableToTypeOf(0)) // Same values +// Ω(5).Should(BeAssignableToTypeOf(-1)) // different values same type +// Ω("foo").Should(BeAssignableToTypeOf("bar")) // different values same type +// Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) +func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { + return &matchers.AssignableToTypeOfMatcher{ + Expected: expected, + } +} + +//Panic succeeds if actual is a function that, when invoked, panics. +//Actual must be a function that takes no arguments and returns no results. +func Panic() types.GomegaMatcher { + return &matchers.PanicMatcher{} +} + +//BeAnExistingFile succeeds if a file exists. +//Actual must be a string representing the abs path to the file being checked. +func BeAnExistingFile() types.GomegaMatcher { + return &matchers.BeAnExistingFileMatcher{} +} + +//BeARegularFile succeeds iff a file exists and is a regular file. +//Actual must be a string representing the abs path to the file being checked. +func BeARegularFile() types.GomegaMatcher { + return &matchers.BeARegularFileMatcher{} +} + +//BeADirectory succeeds iff a file exists and is a directory. +//Actual must be a string representing the abs path to the file being checked. +func BeADirectory() types.GomegaMatcher { + return &matchers.BeADirectoryMatcher{} +} + +//And succeeds only if all of the given matchers succeed. +//The matchers are tried in order, and will fail-fast if one doesn't succeed. +// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func And(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.AndMatcher{Matchers: ms} +} + +//SatisfyAll is an alias for And(). +// Ω("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) +func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return And(matchers...) +} + +//Or succeeds if any of the given matchers succeed. +//The matchers are tried in order and will return immediately upon the first successful match. +// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Or(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.OrMatcher{Matchers: ms} +} + +//SatisfyAny is an alias for Or(). +// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) +func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return Or(matchers...) +} + +//Not negates the given matcher; it succeeds if the given matcher fails. +// Expect(1).To(Not(Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Not(matcher types.GomegaMatcher) types.GomegaMatcher { + return &matchers.NotMatcher{Matcher: matcher} +} + +//WithTransform applies the `transform` to the actual value and matches it against `matcher`. +//The given transform must be a function of one parameter that returns one value. +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { + return matchers.NewWithTransformMatcher(transform, matcher) +} diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go new file mode 100644 index 00000000..94c42a7d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -0,0 +1,64 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AndMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstFailedMatcher types.GomegaMatcher +} + +func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { + m.firstFailedMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if !success || err != nil { + m.firstFailedMatcher = matcher + return false, err + } + } + return true, nil +} + +func (m *AndMatcher) FailureMessage(actual interface{}) (message string) { + return m.firstFailedMatcher.FailureMessage(actual) +} + +func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers)) +} + +func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: T, F, => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become T + + Match eval: T, T, T => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to F. + */ + + if m.firstFailedMatcher == nil { + // so all matchers succeeded.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } else { + // one of the matchers failed.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go new file mode 100644 index 00000000..89a1fc21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go @@ -0,0 +1,31 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type AssignableToTypeOfMatcher struct { + Expected interface{} +} + +func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil || matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + + actualType := reflect.TypeOf(actual) + expectedType := reflect.TypeOf(matcher.Expected) + + return actualType.AssignableTo(expectedType), nil +} + +func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected)) +} + +func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected)) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go new file mode 100644 index 00000000..7b6975e4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notADirectoryError struct { + os.FileInfo +} + +func (t notADirectoryError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.Mode().IsRegular(): + return "file is a regular file" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeADirectoryMatcher struct { + expected interface{} + err error +} + +func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsDir() { + matcher.err = notADirectoryError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err)) +} + +func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a directory")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go new file mode 100644 index 00000000..e239131f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notARegularFileError struct { + os.FileInfo +} + +func (t notARegularFileError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.IsDir(): + return "file is a directory" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeARegularFileMatcher struct { + expected interface{} + err error +} + +func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsRegular() { + matcher.err = notARegularFileError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err)) +} + +func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a regular file")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go new file mode 100644 index 00000000..d42eba22 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -0,0 +1,38 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type BeAnExistingFileMatcher struct { + expected interface{} +} + +func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path") + } + + if _, err = os.Stat(actualFilename); err != nil { + switch { + case os.IsNotExist(err): + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to exist")) +} + +func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to exist")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go new file mode 100644 index 00000000..c1b49959 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -0,0 +1,45 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type BeClosedMatcher struct { +} + +func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1)) + } + + winnerIndex, _, open := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var closed bool + if winnerIndex == 0 { + closed = !open + } else if winnerIndex == 1 { + closed = false + } + + return closed, nil +} + +func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be closed") +} + +func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be open") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go new file mode 100644 index 00000000..55bdd7d1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -0,0 +1,26 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type BeEmptyMatcher struct { +} + +func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == 0, nil +} + +func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be empty") +} + +func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be empty") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go new file mode 100644 index 00000000..32a0c310 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type BeEquivalentToMatcher struct { + Expected interface{} +} + +func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Both actual and expected must not be nil.") + } + + convertedActual := actual + + if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) { + convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface() + } + + return reflect.DeepEqual(convertedActual, matcher.Expected), nil +} + +func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be equivalent to", matcher.Expected) +} + +func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be equivalent to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go new file mode 100644 index 00000000..0b224cbb --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -0,0 +1,25 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type BeFalseMatcher struct { +} + +func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual == false, nil +} + +func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be false") +} + +func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be false") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go new file mode 100644 index 00000000..7ee84fe1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go @@ -0,0 +1,18 @@ +package matchers + +import "github.com/onsi/gomega/format" + +type BeNilMatcher struct { +} + +func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) { + return isNil(actual), nil +} + +func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be nil") +} + +func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be nil") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go new file mode 100644 index 00000000..52f83fe3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -0,0 +1,119 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "math" +) + +type BeNumericallyMatcher struct { + Comparator string + CompareTo []interface{} +} + +func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { + if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { + return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) + } + if !isNumber(actual) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1)) + } + if !isNumber(matcher.CompareTo[0]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + if isFloat(actual) || isFloat(matcher.CompareTo[0]) { + var secondOperand float64 = 1e-8 + if len(matcher.CompareTo) == 2 { + secondOperand = toFloat(matcher.CompareTo[1]) + } + success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand) + } else if isInteger(actual) { + var secondOperand int64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toInteger(matcher.CompareTo[1]) + } + success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand) + } else if isUnsignedInteger(actual) { + var secondOperand uint64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toUnsignedInteger(matcher.CompareTo[1]) + } + success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand) + } else { + return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1)) + } + + return success, nil +} + +func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) { + switch matcher.Comparator { + case "==", "~": + diff := actual - compareTo + return -threshold <= diff && diff <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) { + switch matcher.Comparator { + case "==", "~": + if actual < compareTo { + actual, compareTo = compareTo, actual + } + return actual-compareTo <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) { + switch matcher.Comparator { + case "~": + return math.Abs(actual-compareTo) <= threshold + case "==": + return (actual == compareTo) + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go new file mode 100644 index 00000000..d7c32233 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -0,0 +1,71 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeSentMatcher struct { + Arg interface{} + channelClosed bool +} + +func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.RecvDir { + return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1)) + } + + argType := reflect.TypeOf(matcher.Arg) + assignable := argType.AssignableTo(channelType.Elem()) + + if !assignable { + return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1)) + } + + argValue := reflect.ValueOf(matcher.Arg) + + defer func() { + if e := recover(); e != nil { + success = false + err = fmt.Errorf("Cannot send to a closed channel") + matcher.channelClosed = true + } + }() + + winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var didSend bool + if winnerIndex == 0 { + didSend = true + } + + return didSend, nil +} + +func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go new file mode 100644 index 00000000..abda4eb1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go @@ -0,0 +1,65 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "time" +) + +type BeTemporallyMatcher struct { + Comparator string + CompareTo time.Time + Threshold []time.Duration +} + +func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) { + // predicate to test for time.Time type + isTime := func(t interface{}) bool { + _, ok := t.(time.Time) + return ok + } + + if !isTime(actual) { + return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + var threshold = time.Millisecond + if len(matcher.Threshold) == 1 { + threshold = matcher.Threshold[0] + } + + return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil +} + +func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) { + switch matcher.Comparator { + case "==": + return actual.Equal(compareTo) + case "~": + diff := actual.Sub(compareTo) + return -threshold <= diff && diff <= threshold + case ">": + return actual.After(compareTo) + case ">=": + return !actual.Before(compareTo) + case "<": + return actual.Before(compareTo) + case "<=": + return !actual.After(compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go new file mode 100644 index 00000000..1275e5fc --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -0,0 +1,25 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type BeTrueMatcher struct { +} + +func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual.(bool), nil +} + +func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be true") +} + +func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be true") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go new file mode 100644 index 00000000..b39c9144 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "github.com/onsi/gomega/format" + "reflect" +) + +type BeZeroMatcher struct { +} + +func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return true, nil + } + zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() + + return reflect.DeepEqual(zeroValue, actual), nil + +} + +func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be zero-valued") +} + +func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be zero-valued") +} diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go new file mode 100644 index 00000000..7b0e0886 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -0,0 +1,80 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" +) + +type ConsistOfMatcher struct { + Elements []interface{} +} + +func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elements := matcher.Elements + if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) { + elements = []interface{}{} + value := reflect.ValueOf(matcher.Elements[0]) + for i := 0; i < value.Len(); i++ { + elements = append(elements, value.Index(i).Interface()) + } + } + + matchers := []interface{}{} + for _, element := range elements { + matcher, isMatcher := element.(omegaMatcher) + if !isMatcher { + matcher = &EqualMatcher{Expected: element} + } + matchers = append(matchers, matcher) + } + + values := matcher.valuesOf(actual) + + if len(values) != len(matchers) { + return false, nil + } + + neighbours := func(v, m interface{}) (bool, error) { + match, err := m.(omegaMatcher).Match(v) + return match && err == nil, nil + } + + bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours) + if err != nil { + return false, err + } + + return len(bipartiteGraph.LargestMatching()) == len(values), nil +} + +func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} { + value := reflect.ValueOf(actual) + values := []interface{}{} + if isMap(actual) { + keys := value.MapKeys() + for i := 0; i < value.Len(); i++ { + values = append(values, value.MapIndex(keys[i]).Interface()) + } + } else { + for i := 0; i < value.Len(); i++ { + values = append(values, value.Index(i).Interface()) + } + } + + return values +} + +func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to consist of", matcher.Elements) +} + +func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to consist of", matcher.Elements) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go new file mode 100644 index 00000000..4159335d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -0,0 +1,56 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ContainElementMatcher struct { + Element interface{} +} + +func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) + if !elementIsMatcher { + elemMatcher = &EqualMatcher{Expected: matcher.Element} + } + + value := reflect.ValueOf(actual) + var keys []reflect.Value + if isMap(actual) { + keys = value.MapKeys() + } + var lastError error + for i := 0; i < value.Len(); i++ { + var success bool + var err error + if isMap(actual) { + success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface()) + } else { + success, err = elemMatcher.Match(value.Index(i).Interface()) + } + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain element matching", matcher.Element) +} + +func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain element matching", matcher.Element) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go new file mode 100644 index 00000000..2e760892 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go @@ -0,0 +1,37 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "strings" +) + +type ContainSubstringMatcher struct { + Substr string + Args []interface{} +} + +func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + + return strings.Contains(actualString, matcher.stringToMatch()), nil +} + +func (matcher *ContainSubstringMatcher) stringToMatch() string { + stringToMatch := matcher.Substr + if len(matcher.Args) > 0 { + stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...) + } + return stringToMatch +} + +func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain substring", matcher.stringToMatch()) +} + +func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain substring", matcher.stringToMatch()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go new file mode 100644 index 00000000..d1865973 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type EqualMatcher struct { + Expected interface{} +} + +func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + return reflect.DeepEqual(actual, matcher.Expected), nil +} + +func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to equal", matcher.Expected) +} + +func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to equal", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go new file mode 100644 index 00000000..5701ba6e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -0,0 +1,53 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type HaveKeyMatcher struct { + Key interface{} +} + +func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + return true, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "to have key matching", matcher.Key) + default: + return format.Message(actual, "to have key", matcher.Key) + } +} + +func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "not to have key matching", matcher.Key) + default: + return format.Message(actual, "not to have key", matcher.Key) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go new file mode 100644 index 00000000..464ac187 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -0,0 +1,73 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type HaveKeyWithValueMatcher struct { + Key interface{} + Value interface{} +} + +func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher) + if !valueIsMatcher { + valueMatcher = &EqualMatcher{Expected: matcher.Value} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + actualValue := reflect.ValueOf(actual).MapIndex(keys[i]) + success, err := valueMatcher.Match(actualValue.Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) + } + return success, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) { + str := "to have {key: value}" + if _, ok := matcher.Key.(omegaMatcher); ok { + str += " matching" + } else if _, ok := matcher.Value.(omegaMatcher); ok { + str += " matching" + } + + expect := make(map[interface{}]interface{}, 1) + expect[matcher.Key] = matcher.Value + return format.Message(actual, str, expect) +} + +func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + kStr := "not to have key" + if _, ok := matcher.Key.(omegaMatcher); ok { + kStr = "not to have key matching" + } + + vStr := "or that key's value not be" + if _, ok := matcher.Value.(omegaMatcher); ok { + vStr = "or to have that key's value not matching" + } + + return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go new file mode 100644 index 00000000..a1837755 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type HaveLenMatcher struct { + Count int +} + +func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go new file mode 100644 index 00000000..cdc1d547 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -0,0 +1,30 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveOccurredMatcher struct { +} + +func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) { + if isNil(actual) { + return false, nil + } + + if isError(actual) { + return true, nil + } + + return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) +} + +func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1)) +} + +func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "not to have occurred") +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go new file mode 100644 index 00000000..8b63a899 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go @@ -0,0 +1,35 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type HavePrefixMatcher struct { + Prefix string + Args []interface{} +} + +func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + prefix := matcher.prefix() + return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil +} + +func (matcher *HavePrefixMatcher) prefix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Prefix, matcher.Args...) + } + return matcher.Prefix +} + +func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have prefix", matcher.prefix()) +} + +func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have prefix", matcher.prefix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go new file mode 100644 index 00000000..eb1b284d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go @@ -0,0 +1,35 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type HaveSuffixMatcher struct { + Suffix string + Args []interface{} +} + +func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + suffix := matcher.suffix() + return len(actualString) >= len(suffix) && actualString[len(actualString) - len(suffix):] == suffix, nil +} + +func (matcher *HaveSuffixMatcher) suffix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Suffix, matcher.Args...) + } + return matcher.Suffix +} + +func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have suffix", matcher.suffix()) +} + +func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have suffix", matcher.suffix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go new file mode 100644 index 00000000..03cdf045 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -0,0 +1,50 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type MatchErrorMatcher struct { + Expected interface{} +} + +func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) { + if isNil(actual) { + return false, fmt.Errorf("Expected an error, got nil") + } + + if !isError(actual) { + return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) + } + + actualErr := actual.(error) + + if isString(matcher.Expected) { + return reflect.DeepEqual(actualErr.Error(), matcher.Expected), nil + } + + if isError(matcher.Expected) { + return reflect.DeepEqual(actualErr, matcher.Expected), nil + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + if matcher.Expected != nil { + subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher) + if hasSubMatcher { + return subMatcher.Match(actualErr.Error()) + } + } + + return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings. Got:\n%s", format.Object(matcher.Expected, 1)) +} + +func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match error", matcher.Expected) +} + +func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match error", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go new file mode 100644 index 00000000..efc5e154 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go @@ -0,0 +1,61 @@ +package matchers + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type MatchJSONMatcher struct { + JSONToMatch interface{} +} + +func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.prettyPrint(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + // this is guarded by prettyPrint + json.Unmarshal([]byte(actualString), &aval) + json.Unmarshal([]byte(expectedString), &eval) + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return format.Message(actualString, "to match JSON of", expectedString) +} + +func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return format.Message(actualString, "not to match JSON of", expectedString) +} + +func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, aok := toString(actual) + expectedString, eok := toString(matcher.JSONToMatch) + + if !(aok && eok) { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + + abuf := new(bytes.Buffer) + ebuf := new(bytes.Buffer) + + if err := json.Indent(abuf, []byte(actualString), "", " "); err != nil { + return "", "", err + } + + if err := json.Indent(ebuf, []byte(expectedString), "", " "); err != nil { + return "", "", err + } + + return abuf.String(), ebuf.String(), nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go new file mode 100644 index 00000000..7ca79a15 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go @@ -0,0 +1,42 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "regexp" +) + +type MatchRegexpMatcher struct { + Regexp string + Args []interface{} +} + +func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1)) + } + + match, err := regexp.Match(matcher.regexp(), []byte(actualString)) + if err != nil { + return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error()) + } + + return match, nil +} + +func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) regexp() string { + re := matcher.Regexp + if len(matcher.Args) > 0 { + re = fmt.Sprintf(matcher.Regexp, matcher.Args...) + } + return re +} diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go new file mode 100644 index 00000000..2c91670b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -0,0 +1,30 @@ +package matchers + +import ( + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type NotMatcher struct { + Matcher types.GomegaMatcher +} + +func (m *NotMatcher) Match(actual interface{}) (bool, error) { + success, err := m.Matcher.Match(actual) + if err != nil { + return false, err + } + return !success, nil +} + +func (m *NotMatcher) FailureMessage(actual interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.Matcher.FailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value +} diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go new file mode 100644 index 00000000..3bf79980 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -0,0 +1,67 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type OrMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstSuccessfulMatcher types.GomegaMatcher +} + +func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { + m.firstSuccessfulMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if err != nil { + return false, err + } + if success { + m.firstSuccessfulMatcher = matcher + return true, nil + } + } + return false, nil +} + +func (m *OrMatcher) FailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers)) +} + +func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.firstSuccessfulMatcher.NegatedFailureMessage(actual) +} + +func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: F, T, => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become F + + Match eval: F, F, F => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to T. + */ + + if m.firstSuccessfulMatcher != nil { + // one of the matchers succeeded.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) + } else { + // so all matchers failed.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go new file mode 100644 index 00000000..75ab251b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -0,0 +1,42 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type PanicMatcher struct{} + +func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return false, fmt.Errorf("PanicMatcher expects a non-nil actual.") + } + + actualType := reflect.TypeOf(actual) + if actualType.Kind() != reflect.Func { + return false, fmt.Errorf("PanicMatcher expects a function. Got:\n%s", format.Object(actual, 1)) + } + if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) { + return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value. Got:\n%s", format.Object(actual, 1)) + } + + success = false + defer func() { + if e := recover(); e != nil { + success = true + } + }() + + reflect.ValueOf(actual).Call([]reflect.Value{}) + + return +} + +func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to panic") +} + +func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to panic") +} diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go new file mode 100644 index 00000000..7a8c2cda --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -0,0 +1,126 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ReceiveMatcher struct { + Arg interface{} + receivedValue reflect.Value + channelClosed bool +} + +func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel. Got:\n%s", format.Object(actual, 1)) + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + + if matcher.Arg != nil { + subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + if !hasSubMatcher { + argType := reflect.TypeOf(matcher.Arg) + if argType.Kind() != reflect.Ptr { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + + assignable := channelType.Elem().AssignableTo(argType.Elem()) + if !assignable { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + } + } + + winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var closed bool + var didReceive bool + if winnerIndex == 0 { + closed = !open + didReceive = open + } + matcher.channelClosed = closed + + if closed { + return false, nil + } + + if hasSubMatcher { + if didReceive { + matcher.receivedValue = value + return subMatcher.Match(matcher.receivedValue.Interface()) + } else { + return false, nil + } + } + + if didReceive { + if matcher.Arg != nil { + outValue := reflect.ValueOf(matcher.Arg) + reflect.Indirect(outValue).Set(value) + } + + return true, nil + } else { + return false, nil + } +} + +func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.FailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } else { + return format.Message(actual, "to receive something."+closedAddendum) + } +} + +func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } else { + return format.Message(actual, "not to receive anything."+closedAddendum) + } +} + +func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go new file mode 100644 index 00000000..f7dd8534 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -0,0 +1,30 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type SucceedMatcher struct { +} + +func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return true, nil + } + + if isError(actual) { + return false, nil + } + + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) +} + +func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) +} + +func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return "Expected failure, but got no error." +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go new file mode 100644 index 00000000..119d21ef --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -0,0 +1,41 @@ +package bipartitegraph + +import "errors" +import "fmt" + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" + +type BipartiteGraph struct { + Left NodeOrderedSet + Right NodeOrderedSet + Edges EdgeSet +} + +func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { + left := NodeOrderedSet{} + for i, _ := range leftValues { + left = append(left, Node{i}) + } + + right := NodeOrderedSet{} + for j, _ := range rightValues { + right = append(right, Node{j + len(left)}) + } + + edges := EdgeSet{} + for i, leftValue := range leftValues { + for j, rightValue := range rightValues { + neighbours, err := neighbours(leftValue, rightValue) + if err != nil { + return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())) + } + + if neighbours { + edges = append(edges, Edge{left[i], right[j]}) + } + } + } + + return &BipartiteGraph{left, right, edges}, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go new file mode 100644 index 00000000..32529c51 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -0,0 +1,161 @@ +package bipartitegraph + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" +import "github.com/onsi/gomega/matchers/support/goraph/util" + +func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) { + paths := bg.maximalDisjointSLAPCollection(matching) + + for len(paths) > 0 { + for _, path := range paths { + matching = matching.SymmetricDifference(path) + } + paths = bg.maximalDisjointSLAPCollection(matching) + } + + return +} + +func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) { + guideLayers := bg.createSLAPGuideLayers(matching) + if len(guideLayers) == 0 { + return + } + + used := make(map[Node]bool) + + for _, u := range guideLayers[len(guideLayers)-1] { + slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used) + if found { + for _, edge := range slap { + used[edge.Node1] = true + used[edge.Node2] = true + } + result = append(result, slap) + } + } + + return +} + +func (bg *BipartiteGraph) findDisjointSLAP( + start Node, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) ([]Edge, bool) { + return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used) +} + +func (bg *BipartiteGraph) findDisjointSLAPHelper( + currentNode Node, + currentSLAP EdgeSet, + currentLevel int, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) (EdgeSet, bool) { + used[currentNode] = true + + if currentLevel == 0 { + return currentSLAP, true + } + + for _, nextNode := range guideLayers[currentLevel-1] { + if used[nextNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(currentNode, nextNode) + if !found { + continue + } + + if matching.Contains(edge) == util.Odd(currentLevel) { + continue + } + + currentSLAP = append(currentSLAP, edge) + slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used) + if found { + return slap, true + } + currentSLAP = currentSLAP[:len(currentSLAP)-1] + } + + used[currentNode] = false + return nil, false +} + +func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) { + used := make(map[Node]bool) + currentLayer := NodeOrderedSet{} + + for _, node := range bg.Left { + if matching.Free(node) { + used[node] = true + currentLayer = append(currentLayer, node) + } + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } else { + guideLayers = append(guideLayers, currentLayer) + } + + done := false + + for !done { + lastLayer := currentLayer + currentLayer = NodeOrderedSet{} + + if util.Odd(len(guideLayers)) { + for _, leftNode := range lastLayer { + for _, rightNode := range bg.Right { + if used[rightNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, rightNode) + used[rightNode] = true + + if matching.Free(rightNode) { + done = true + } + } + } + } else { + for _, rightNode := range lastLayer { + for _, leftNode := range bg.Left { + if used[leftNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || !matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, leftNode) + used[leftNode] = true + } + } + + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } else { + guideLayers = append(guideLayers, currentLayer) + } + } + + return +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go new file mode 100644 index 00000000..4fd15cc0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go @@ -0,0 +1,61 @@ +package edge + +import . "github.com/onsi/gomega/matchers/support/goraph/node" + +type Edge struct { + Node1 Node + Node2 Node +} + +type EdgeSet []Edge + +func (ec EdgeSet) Free(node Node) bool { + for _, e := range ec { + if e.Node1 == node || e.Node2 == node { + return false + } + } + + return true +} + +func (ec EdgeSet) Contains(edge Edge) bool { + for _, e := range ec { + if e == edge { + return true + } + } + + return false +} + +func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { + for _, e := range ec { + if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) { + return e, true + } + } + + return Edge{}, false +} + +func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet { + edgesToInclude := make(map[Edge]bool) + + for _, e := range ec { + edgesToInclude[e] = true + } + + for _, e := range ec2 { + edgesToInclude[e] = !edgesToInclude[e] + } + + result := EdgeSet{} + for e, include := range edgesToInclude { + if include { + result = append(result, e) + } + } + + return result +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go new file mode 100644 index 00000000..800c2ea8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go @@ -0,0 +1,7 @@ +package node + +type Node struct { + Id int +} + +type NodeOrderedSet []Node diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go new file mode 100644 index 00000000..a24cd275 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go @@ -0,0 +1,7 @@ +package util + +import "math" + +func Odd(n int) bool { + return math.Mod(float64(n), 2.0) == 1.0 +} diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go new file mode 100644 index 00000000..ef9b4483 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -0,0 +1,165 @@ +/* +Gomega matchers + +This package implements the Gomega matchers and does not typically need to be imported. +See the docs for Gomega for documentation on the matchers + +http://onsi.github.io/gomega/ +*/ +package matchers + +import ( + "fmt" + "reflect" +) + +type omegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} + +func isBool(a interface{}) bool { + return reflect.TypeOf(a).Kind() == reflect.Bool +} + +func isNumber(a interface{}) bool { + if a == nil { + return false + } + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Float64 +} + +func isInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Int64 +} + +func isUnsignedInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Uint <= kind && kind <= reflect.Uint64 +} + +func isFloat(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Float32 <= kind && kind <= reflect.Float64 +} + +func toInteger(a interface{}) int64 { + if isInteger(a) { + return reflect.ValueOf(a).Int() + } else if isUnsignedInteger(a) { + return int64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return int64(reflect.ValueOf(a).Float()) + } else { + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) + } +} + +func toUnsignedInteger(a interface{}) uint64 { + if isInteger(a) { + return uint64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return reflect.ValueOf(a).Uint() + } else if isFloat(a) { + return uint64(reflect.ValueOf(a).Float()) + } else { + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) + } +} + +func toFloat(a interface{}) float64 { + if isInteger(a) { + return float64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return float64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return reflect.ValueOf(a).Float() + } else { + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) + } +} + +func isError(a interface{}) bool { + _, ok := a.(error) + return ok +} + +func isChan(a interface{}) bool { + if isNil(a) { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Chan +} + +func isMap(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Map +} + +func isArrayOrSlice(a interface{}) bool { + if a == nil { + return false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Slice: + return true + default: + return false + } +} + +func isString(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.String +} + +func toString(a interface{}) (string, bool) { + aString, isString := a.(string) + if isString { + return aString, true + } + + aBytes, isBytes := a.([]byte) + if isBytes { + return string(aBytes), true + } + + aStringer, isStringer := a.(fmt.Stringer) + if isStringer { + return aStringer.String(), true + } + + return "", false +} + +func lengthOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Len(), true + default: + return 0, false + } +} + +func isNil(a interface{}) bool { + if a == nil { + return true + } + + switch reflect.TypeOf(a).Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return reflect.ValueOf(a).IsNil() + } + + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go new file mode 100644 index 00000000..8e58d8a0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -0,0 +1,72 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type WithTransformMatcher struct { + // input + Transform interface{} // must be a function of one parameter that returns one value + Matcher types.GomegaMatcher + + // cached value + transformArgType reflect.Type + + // state + transformedValue interface{} +} + +func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { + if transform == nil { + panic("transform function cannot be nil") + } + txType := reflect.TypeOf(transform) + if txType.NumIn() != 1 { + panic("transform function must have 1 argument") + } + if txType.NumOut() != 1 { + panic("transform function must have 1 return value") + } + + return &WithTransformMatcher{ + Transform: transform, + Matcher: matcher, + transformArgType: reflect.TypeOf(transform).In(0), + } +} + +func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { + // return error if actual's type is incompatible with Transform function's argument type + actualType := reflect.TypeOf(actual) + if !actualType.AssignableTo(m.transformArgType) { + return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType) + } + + // call the Transform function with `actual` + fn := reflect.ValueOf(m.Transform) + result := fn.Call([]reflect.Value{reflect.ValueOf(actual)}) + m.transformedValue = result[0].Interface() // expect exactly one value + + return m.Matcher.Match(m.transformedValue) +} + +func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) { + return m.Matcher.FailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { + // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.) + // + // Querying the next matcher is fine if the transformer always will return the same value. + // But if the transformer is non-deterministic and returns a different value each time, then there + // is no point in querying the next matcher, since it can only comment on the last transformed value. + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) +} diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go new file mode 100644 index 00000000..1c632ade --- /dev/null +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -0,0 +1,17 @@ +package types + +type GomegaFailHandler func(message string, callerSkip ...int) + +//A simple *testing.T interface wrapper +type GomegaTestingT interface { + Errorf(format string, args ...interface{}) +} + +//All Gomega matchers must implement the GomegaMatcher interface +// +//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding_your_own_matchers +type GomegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +}