From 4ec5108e015b6271cad9a6a2ff93932d7f7e898a Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Thu, 9 Feb 2023 11:15:13 -0500 Subject: [PATCH 01/12] Fix broken test It is only half broken, in that this update has no effect on the production code under test, but the incorrect index causes a panic in the new system, whereas the old one silently ignored the value. --- tests/integration/explain/default/delete_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/explain/default/delete_test.go b/tests/integration/explain/default/delete_test.go index 9006f8ffaa..3e2321abbf 100644 --- a/tests/integration/explain/default/delete_test.go +++ b/tests/integration/explain/default/delete_test.go @@ -161,8 +161,8 @@ func TestExplainDeletionUsingMultiAndSingleIDs_Success(t *testing.T) { }, Updates: map[int]map[int][]string{ - 0: { - 2: { + 2: { + 0: { `{ "age": 28, "verified": false From ff9162e290e385dc67c61cec3cf62b083d61bb2d Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 8 Feb 2023 15:16:12 -0500 Subject: [PATCH 02/12] Create new integration test framework Creates a new integration test framework, and migrates shared functions to its files from the old framework. The Init code is unchanged, as is the change detector code - they have just moved. Is not in use until the next commit - I thought it might be easier to review if split into two. It provides the same features as the old system, but without implicit/ridged order of operations, and hopefully simpler code. Bonus fix - transactional tests now work with the change detector. --- tests/integration/changeDetector.go | 288 ++++++++++ tests/integration/explain/utils.go | 2 +- tests/integration/testCase.go | 152 ++++++ tests/integration/utils.go | 585 --------------------- tests/integration/utils2.go | 781 ++++++++++++++++++++++++++++ 5 files changed, 1222 insertions(+), 586 deletions(-) create mode 100644 tests/integration/changeDetector.go create mode 100644 tests/integration/testCase.go create mode 100644 tests/integration/utils2.go diff --git a/tests/integration/changeDetector.go b/tests/integration/changeDetector.go new file mode 100644 index 0000000000..9f0dca8d88 --- /dev/null +++ b/tests/integration/changeDetector.go @@ -0,0 +1,288 @@ +// Copyright 2022 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "context" + "fmt" + "io/fs" + "os" + "os/exec" + "path" + "runtime" + "strings" + "testing" +) + +func IsDetectingDbChanges() bool { + return DetectDbChanges +} + +// Returns true if test should pass early +func DetectDbChangesPreTestChecks( + t *testing.T, + collectionNames []string, +) bool { + if previousTestCaseTestName == t.Name() { + // The database format changer currently only supports running the first test + // case, if a second case is detected we return early + return true + } + previousTestCaseTestName = t.Name() + + if areDatabaseFormatChangesDocumented { + // If we are checking that database formatting changes have been made and + // documented, and changes are documented, then the tests can all pass. + return true + } + + if len(collectionNames) == 0 { + // If the test doesn't specify any collections, then we can't use it to check + // the database format, so we skip it + t.SkipNow() + } + + return false +} + +func detectDbChangesInit(repository string, targetBranch string) { + badgerFile = true + badgerInMemory = false + + if SetupOnly { + // Only the primary test process should perform the setup below + return + } + + tempDir := os.TempDir() + + latestTargetCommitHash := getLatestCommit(repository, targetBranch) + detectDbChangesCodeDir = path.Join(tempDir, "defra", latestTargetCommitHash, "code") + + _, err := os.Stat(detectDbChangesCodeDir) + // Warning - there is a race condition here, where if running multiple packages in + // parallel (as per default) against a new target commit multiple test pacakges will + // try and clone the target branch at the same time (and will fail). + // This could be solved by using a file lock or similar, however running the change + // detector in parallel is significantly slower than running it serially due to machine + // resource constraints, so I am leaving the race condition in and recommending running + // the change detector with the CLI args `-p 1` + if os.IsNotExist(err) { + cloneCmd := exec.Command( + "git", + "clone", + "-b", + targetBranch, + "--single-branch", + repository, + detectDbChangesCodeDir, + ) + cloneCmd.Stdout = os.Stdout + cloneCmd.Stderr = os.Stderr + err := cloneCmd.Run() + if err != nil { + panic(err) + } + } else if err != nil { + panic(err) + } else { + // Cache must be cleaned, or it might not run the test setup! + // Note: this also acts as a race condition if multiple build are running against the + // same target if this happens some tests might be silently skipped if the + // child-setup fails. Currently I think it is worth it for slightly faster build + // times, but feel very free to change this! + goTestCacheCmd := exec.Command("go", "clean", "-testcache") + goTestCacheCmd.Dir = detectDbChangesCodeDir + err = goTestCacheCmd.Run() + if err != nil { + panic(err) + } + } + + areDatabaseFormatChangesDocumented = checkIfDatabaseFormatChangesAreDocumented() +} + +func SetupDatabaseUsingTargetBranch( + ctx context.Context, + t *testing.T, + dbi databaseInfo, + collectionNames []string, +) databaseInfo { + // Close this database instance so it may be re-inited in the child process, + // and this one post-child + dbi.db.Close(ctx) + + currentTestPackage, err := os.Getwd() + if err != nil { + panic(err) + } + + targetTestPackage := detectDbChangesCodeDir + "/tests/integration/" + strings.Split( + currentTestPackage, + "/tests/integration/", + )[1] + + // If we are checking for database changes, and we are not seting up the database, + // then we must be in the main test process, and need to create a new process + // setting up the database for this test using the old branch We should not setup + // the database using the current branch/process + goTestCmd := exec.Command( + "go", + "test", + "./...", + "--run", + fmt.Sprintf("^%s$", t.Name()), + "-v", + ) + + goTestCmd.Dir = targetTestPackage + goTestCmd.Env = os.Environ() + goTestCmd.Env = append( + goTestCmd.Env, + setupOnlyEnvName+"=true", + fileBadgerPathEnvName+"="+dbi.path, + ) + out, err := goTestCmd.Output() + + if err != nil { + // If file is not found - this must be a new test and + // doesn't exist in the target branch, so we pass it + // because the child process tries to run the test, but + // if it doesnt find it, the parent test should pass (not panic). + if strings.Contains(err.Error(), ": no such file or directory") { + t.SkipNow() + } else { + // Only log the output if there is an error different from above, + // logging child test runs confuses the go test runner making it + // think there are no tests in the parent run (it will still + // run everything though)! + log.ErrorE(ctx, string(out), err) + panic(err) + } + } + + refreshedDb, err := newBadgerFileDB(ctx, t, dbi.path) + if err != nil { + panic(err) + } + + _, err = refreshedDb.db.GetCollectionByName(ctx, collectionNames[0]) + if err != nil { + if err.Error() == "datastore: key not found" { + // If collection is not found - this must be a new test and + // doesn't exist in the target branch, so we pass it + t.SkipNow() + } else { + panic(err) + } + } + return refreshedDb +} + +func checkIfDatabaseFormatChangesAreDocumented() bool { + previousDbChangeFiles, targetDirFound := getDatabaseFormatDocumentation( + detectDbChangesCodeDir, + false, + ) + if !targetDirFound { + panic("Documentation directory not found") + } + + previousDbChanges := make(map[string]struct{}, len(previousDbChangeFiles)) + for _, f := range previousDbChangeFiles { + // Note: we assume flat directory for now - sub directories are not expanded + previousDbChanges[f.Name()] = struct{}{} + } + + _, thisFilePath, _, _ := runtime.Caller(0) + currentDbChanges, currentDirFound := getDatabaseFormatDocumentation(thisFilePath, true) + if !currentDirFound { + panic("Documentation directory not found") + } + + for _, f := range currentDbChanges { + if _, isChangeOld := previousDbChanges[f.Name()]; !isChangeOld { + // If there is a new file in the directory then the change + // has been documented and the test should pass + return true + } + } + + return false +} + +func getDatabaseFormatDocumentation(startPath string, allowDescend bool) ([]fs.DirEntry, bool) { + startInfo, err := os.Stat(startPath) + if err != nil { + panic(err) + } + + var currentDirectory string + if startInfo.IsDir() { + currentDirectory = startPath + } else { + currentDirectory = path.Dir(startPath) + } + + for { + directoryContents, err := os.ReadDir(currentDirectory) + if err != nil { + panic(err) + } + + for _, directoryItem := range directoryContents { + directoryItemPath := path.Join(currentDirectory, directoryItem.Name()) + if directoryItem.Name() == documentationDirectoryName { + probableFormatChangeDirectoryContents, err := os.ReadDir(directoryItemPath) + if err != nil { + panic(err) + } + for _, possibleDocumentationItem := range probableFormatChangeDirectoryContents { + if path.Ext(possibleDocumentationItem.Name()) == ".md" { + // If the directory's name matches the expected, and contains .md files + // we assume it is the documentation directory + return probableFormatChangeDirectoryContents, true + } + } + } else { + if directoryItem.IsDir() { + childContents, directoryFound := getDatabaseFormatDocumentation(directoryItemPath, false) + if directoryFound { + return childContents, true + } + } + } + } + + if allowDescend { + // If not found in this directory, continue down the path + currentDirectory = path.Dir(currentDirectory) + + if currentDirectory == "." || currentDirectory == "/" { + panic("Database documentation directory not found") + } + } else { + return []fs.DirEntry{}, false + } + } +} + +func getLatestCommit(repoName string, branchName string) string { + cmd := exec.Command("git", "ls-remote", repoName, "refs/heads/"+branchName) + result, err := cmd.Output() + if err != nil { + panic(err) + } + + // This is a tab, not a space! + seperator := "\t" + return strings.Split(string(result), seperator)[0] +} diff --git a/tests/integration/explain/utils.go b/tests/integration/explain/utils.go index 5966e8a776..1e73ca9f9b 100644 --- a/tests/integration/explain/utils.go +++ b/tests/integration/explain/utils.go @@ -112,7 +112,7 @@ func ExecuteExplainRequestTestCase( collectionNames []string, explainTest ExplainRequestTestCase, ) { - if testUtils.DetectDbChanges && testUtils.DetectDbChangesPreTestChecks(t, collectionNames, false) { + if testUtils.DetectDbChanges && testUtils.DetectDbChangesPreTestChecks(t, collectionNames) { return } diff --git a/tests/integration/testCase.go b/tests/integration/testCase.go new file mode 100644 index 0000000000..8e7e36c605 --- /dev/null +++ b/tests/integration/testCase.go @@ -0,0 +1,152 @@ +// Copyright 2022 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +// TestCase contains the details of the test case to execute. +type TestCase struct { + // Test description, optional. + Description string + + // Actions contains the set of actions and their expected results that + // this test should execute. They will execute in the order that they + // are provided. + Actions []any +} + +// SetupComplete is a flag to explicitly notify the change detector at which point +// setup is complete so that it may split actions across database code-versions. +// +// If a SetupComplete action is not provided the change detector will split before +// the first item that is neither a SchemaUpdate, CreateDoc or UpdateDoc action. +type SetupComplete struct{} + +// SchemaUpdate is an action that will update the database schema. +type SchemaUpdate struct { + // The schema update. + Schema string + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// CreateDoc will attempt to create the given document in the given collection +// using the collection api. +type CreateDoc struct { + // The collection in which this document should be created. + CollectionId int + + // The document to create, in JSON string format. + Doc string + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// UpdateDoc will attempt to update the given document in the given collection +// using the collection api. +type UpdateDoc struct { + // The collection in which this document exists. + CollectionId int + + // The index-identifier of the document within the collection. This is based on + // the order in which it was created, not the ordering of the document within the + // database. + DocId int + + // The document update, in JSON string format. Will only update the properties + // provided. + Doc string + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// Request represents a standard Defra (GQL) request. +type Request struct { + // The request to execute. + Request string + + // The expected (data) results of the issued request. + Results []map[string]any + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// TransactionRequest2 represents a transactional request. +// +// A new transaction will be created for the first TransactionRequest2 of any given +// TransactionId. TransactionRequest2s will be submitted to the database in the order +// in which they are recieved (interleaving amoungst other actions if provided), however +// they will not be commited until a TransactionCommit of matching TransactionId is +// provided. +type TransactionRequest2 struct { + // Used to identify the transaction for this to run against. + TransactionId int + + // The request to run against the transaction. + Request string + + // The expected (data) results of the issued request. + Results []map[string]any + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// TransactionCommit represents a commit request for a transaction of the given id. +type TransactionCommit struct { + // Used to identify the transaction to commit. + TransactionId int + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// SubscriptionRequest2 represents a subscription request. +// +// The subscription will remain active until shortly after all actions have been processed. +// The results of the subscription will then be asserted upon. +type SubscriptionRequest2 struct { + // The subscription request to submit. + Request string + + // If set to true, the request should yield no results and should instead timeout. + // The timeout is duration is that of subscriptionTimeout (1 second). + ExpectedTimout bool + + // The expected (data) results yielded through the subscription across its lifetime. + Results []map[string]any + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} diff --git a/tests/integration/utils.go b/tests/integration/utils.go index a030ba46f8..216745c5ac 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -12,52 +12,18 @@ package tests import ( "context" - "fmt" - "io/fs" - "os" - "os/exec" - "path" - "runtime" - "strings" "testing" "time" - badger "github.com/dgraph-io/badger/v3" - ds "github.com/ipfs/go-datastore" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" - "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/db" - "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" ) -const ( - memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" - fileBadgerEnvName = "DEFRA_BADGER_FILE" - fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" - inMemoryEnvName = "DEFRA_IN_MEMORY" - setupOnlyEnvName = "DEFRA_SETUP_ONLY" - detectDbChangesEnvName = "DEFRA_DETECT_DATABASE_CHANGES" - repositoryEnvName = "DEFRA_CODE_REPOSITORY" - targetBranchEnvName = "DEFRA_TARGET_BRANCH" - documentationDirectoryName = "data_format_changes" -) - -var ( - log = logging.MustNewLogger("defra.tests.integration") - badgerInMemory bool - badgerFile bool - inMemoryStore bool -) - -const subscriptionTimeout = 1 * time.Second - // Represents a subscription request. type SubscriptionRequest struct { Request string @@ -108,214 +74,6 @@ type RequestTestCase struct { ExpectedError string } -type databaseInfo struct { - name string - path string - db client.DB - rootstore ds.Batching -} - -func (dbi databaseInfo) Name() string { - return dbi.name -} - -func (dbi databaseInfo) Rootstore() ds.Batching { - return dbi.rootstore -} - -func (dbi databaseInfo) DB() client.DB { - return dbi.db -} - -var databaseDir string - -/* -If this is set to true the integration test suite will instead of it's normal profile do -the following: - -On [package] Init: - - Get the (local) latest commit from the target/parent branch // code assumes - git fetch has been done - - Check to see if a clone of that commit/branch is available in the temp dir, and - if not clone the target branch - - Check to see if there are any new .md files in the current branch's data_format_changes - dir (vs the target branch) - -For each test: - - If new documentation detected, pass the test and exit - - Create a new (test/auto-deleted) temp dir for defra to live/run in - - Run the test setup (add initial schema, docs, updates) using the target branch (test is skipped - if test does not exist in target and is new to this branch) - - Run the test request and assert results (as per normal tests) using the current branch -*/ -var DetectDbChanges bool -var SetupOnly bool - -var detectDbChangesCodeDir string -var areDatabaseFormatChangesDocumented bool -var previousTestCaseTestName string - -func init() { - // We use environment variables instead of flags `go test ./...` throws for all packages - // that don't have the flag defined - badgerFileValue, _ := os.LookupEnv(fileBadgerEnvName) - badgerInMemoryValue, _ := os.LookupEnv(memoryBadgerEnvName) - databaseDir, _ = os.LookupEnv(fileBadgerPathEnvName) - detectDbChangesValue, _ := os.LookupEnv(detectDbChangesEnvName) - inMemoryStoreValue, _ := os.LookupEnv(inMemoryEnvName) - repositoryValue, repositorySpecified := os.LookupEnv(repositoryEnvName) - setupOnlyValue, _ := os.LookupEnv(setupOnlyEnvName) - targetBranchValue, targetBranchSpecified := os.LookupEnv(targetBranchEnvName) - - badgerFile = getBool(badgerFileValue) - badgerInMemory = getBool(badgerInMemoryValue) - inMemoryStore = getBool(inMemoryStoreValue) - DetectDbChanges = getBool(detectDbChangesValue) - SetupOnly = getBool(setupOnlyValue) - - if !repositorySpecified { - repositoryValue = "git@github.com:sourcenetwork/defradb.git" - } - - if !targetBranchSpecified { - targetBranchValue = "develop" - } - - // default is to run against all - if !badgerInMemory && !badgerFile && !inMemoryStore && !DetectDbChanges { - badgerInMemory = true - // Testing against the file system is off by default - badgerFile = false - inMemoryStore = true - } - - if DetectDbChanges { - detectDbChangesInit(repositoryValue, targetBranchValue) - } -} - -func getBool(val string) bool { - switch strings.ToLower(val) { - case "true": - return true - default: - return false - } -} - -func IsDetectingDbChanges() bool { - return DetectDbChanges -} - -// AssertPanicAndSkipChangeDetection asserts that the code of function actually panics, -// also ensures the change detection is skipped so no false fails happen. -// -// Usage: AssertPanicAndSkipChangeDetection(t, func() { executeTestCase(t, test) }) -func AssertPanicAndSkipChangeDetection(t *testing.T, f assert.PanicTestFunc) bool { - if IsDetectingDbChanges() { - // The `assert.Panics` call will falsely fail if this test is executed during - // a detect changes test run - t.Skip() - } - return assert.Panics(t, f, "expected a panic, but none found.") -} - -func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (databaseInfo, error) { - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} - rootstore, err := badgerds.NewDatastore("", &opts) - if err != nil { - return databaseInfo{}, err - } - - dbopts = append(dbopts, db.WithUpdateEvents()) - - db, err := db.NewDB(ctx, rootstore, dbopts...) - if err != nil { - return databaseInfo{}, err - } - - return databaseInfo{ - name: "badger-in-memory", - db: db, - rootstore: rootstore, - }, nil -} - -func NewInMemoryDB(ctx context.Context) (databaseInfo, error) { - rootstore := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents()) - if err != nil { - return databaseInfo{}, err - } - - return databaseInfo{ - name: "defra-memory-datastore", - db: db, - rootstore: rootstore, - }, nil -} - -func NewBadgerFileDB(ctx context.Context, t testing.TB) (databaseInfo, error) { - var path string - if databaseDir == "" { - path = t.TempDir() - } else { - path = databaseDir - } - - return newBadgerFileDB(ctx, t, path) -} - -func newBadgerFileDB(ctx context.Context, t testing.TB, path string) (databaseInfo, error) { - opts := badgerds.Options{Options: badger.DefaultOptions(path)} - rootstore, err := badgerds.NewDatastore(path, &opts) - if err != nil { - return databaseInfo{}, err - } - - db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents()) - if err != nil { - return databaseInfo{}, err - } - - return databaseInfo{ - name: "badger-file-system", - path: path, - db: db, - rootstore: rootstore, - }, nil -} - -func GetDatabases(ctx context.Context, t *testing.T) ([]databaseInfo, error) { - databases := []databaseInfo{} - - if badgerInMemory { - badgerIMDatabase, err := NewBadgerMemoryDB(ctx) - if err != nil { - return nil, err - } - databases = append(databases, badgerIMDatabase) - } - - if badgerFile { - badgerIMDatabase, err := NewBadgerFileDB(ctx, t) - if err != nil { - return nil, err - } - databases = append(databases, badgerIMDatabase) - } - - if inMemoryStore { - inMemoryDatabase, err := NewInMemoryDB(ctx) - if err != nil { - return nil, err - } - databases = append(databases, inMemoryDatabase) - } - - return databases, nil -} - func ExecuteRequestTestCase( t *testing.T, schema string, @@ -503,97 +261,6 @@ func ExecuteRequestTestCase( } } -func detectDbChangesInit(repository string, targetBranch string) { - badgerFile = true - badgerInMemory = false - - if SetupOnly { - // Only the primary test process should perform the setup below - return - } - - tempDir := os.TempDir() - - latestTargetCommitHash := getLatestCommit(repository, targetBranch) - detectDbChangesCodeDir = path.Join(tempDir, "defra", latestTargetCommitHash, "code") - - _, err := os.Stat(detectDbChangesCodeDir) - // Warning - there is a race condition here, where if running multiple packages in - // parallel (as per default) against a new target commit multiple test pacakges will - // try and clone the target branch at the same time (and will fail). - // This could be solved by using a file lock or similar, however running the change - // detector in parallel is significantly slower than running it serially due to machine - // resource constraints, so I am leaving the race condition in and recommending running - // the change detector with the CLI args `-p 1` - if os.IsNotExist(err) { - cloneCmd := exec.Command( - "git", - "clone", - "-b", - targetBranch, - "--single-branch", - repository, - detectDbChangesCodeDir, - ) - cloneCmd.Stdout = os.Stdout - cloneCmd.Stderr = os.Stderr - err := cloneCmd.Run() - if err != nil { - panic(err) - } - } else if err != nil { - panic(err) - } else { - // Cache must be cleaned, or it might not run the test setup! - // Note: this also acts as a race condition if multiple build are running against the - // same target if this happens some tests might be silently skipped if the - // child-setup fails. Currently I think it is worth it for slightly faster build - // times, but feel very free to change this! - goTestCacheCmd := exec.Command("go", "clean", "-testcache") - goTestCacheCmd.Dir = detectDbChangesCodeDir - err = goTestCacheCmd.Run() - if err != nil { - panic(err) - } - } - - areDatabaseFormatChangesDocumented = checkIfDatabaseFormatChangesAreDocumented() -} - -// Returns true if test should pass early -func DetectDbChangesPreTestChecks( - t *testing.T, - collectionNames []string, - isTransactional bool, -) bool { - if previousTestCaseTestName == t.Name() { - // The database format changer currently only supports running the first test - // case, if a second case is detected we return early - return true - } - previousTestCaseTestName = t.Name() - - if areDatabaseFormatChangesDocumented { - // If we are checking that database formatting changes have been made and - // documented, and changes are documented, then the tests can all pass. - return true - } - - if isTransactional { - // Transactional requests are not yet supported by the database change - // detector, so we skip the test - t.SkipNow() - } - - if len(collectionNames) == 0 { - // If the test doesn't specify any collections, then we can't use it to check - // the database format, so we skip it - t.SkipNow() - } - - return false -} - func SetupDatabase( ctx context.Context, t *testing.T, @@ -658,255 +325,3 @@ func SetupDatabase( } } } - -func SetupDatabaseUsingTargetBranch( - ctx context.Context, - t *testing.T, - dbi databaseInfo, - collectionNames []string, -) databaseInfo { - // Close this database instance so it may be re-inited in the child process, - // and this one post-child - dbi.db.Close(ctx) - - currentTestPackage, err := os.Getwd() - if err != nil { - panic(err) - } - - targetTestPackage := detectDbChangesCodeDir + "/tests/integration/" + strings.Split( - currentTestPackage, - "/tests/integration/", - )[1] - - // If we are checking for database changes, and we are not seting up the database, - // then we must be in the main test process, and need to create a new process - // setting up the database for this test using the old branch We should not setup - // the database using the current branch/process - goTestCmd := exec.Command( - "go", - "test", - "./...", - "--run", - fmt.Sprintf("^%s$", t.Name()), - "-v", - ) - - goTestCmd.Dir = targetTestPackage - goTestCmd.Env = os.Environ() - goTestCmd.Env = append( - goTestCmd.Env, - setupOnlyEnvName+"=true", - fileBadgerPathEnvName+"="+dbi.path, - ) - out, err := goTestCmd.Output() - - if err != nil { - // If file is not found - this must be a new test and - // doesn't exist in the target branch, so we pass it - // because the child process tries to run the test, but - // if it doesnt find it, the parent test should pass (not panic). - if strings.Contains(err.Error(), ": no such file or directory") { - t.SkipNow() - } else { - // Only log the output if there is an error different from above, - // logging child test runs confuses the go test runner making it - // think there are no tests in the parent run (it will still - // run everything though)! - log.ErrorE(ctx, string(out), err) - panic(err) - } - } - - refreshedDb, err := newBadgerFileDB(ctx, t, dbi.path) - if err != nil { - panic(err) - } - - _, err = refreshedDb.db.GetCollectionByName(ctx, collectionNames[0]) - if err != nil { - if err.Error() == "datastore: key not found" { - // If collection is not found - this must be a new test and - // doesn't exist in the target branch, so we pass it - t.SkipNow() - } else { - panic(err) - } - } - return refreshedDb -} - -func assertRequestResults( - ctx context.Context, - t *testing.T, - description string, - result *client.GQLResult, - expectedResults []map[string]any, - expectedError string, -) bool { - if AssertErrors(t, description, result.Errors, expectedError) { - return true - } - - // Note: if result.Data == nil this panics (the panic seems useful while testing). - resultantData := result.Data.([]map[string]any) - - log.Info(ctx, "", logging.NewKV("RequestResults", result.Data)) - - // compare results - assert.Equal(t, len(expectedResults), len(resultantData), description) - if len(expectedResults) == 0 { - assert.Equal(t, expectedResults, resultantData) - } - for i, result := range resultantData { - if len(expectedResults) > i { - assert.Equal(t, expectedResults[i], result, description) - } - } - - return false -} - -// Asserts as to whether an error has been raised as expected (or not). If an expected -// error has been raised it will return true, returns false in all other cases. -func AssertError(t *testing.T, description string, err error, expectedError string) bool { - if err == nil { - return false - } - - if expectedError == "" { - assert.NoError(t, err, description) - return false - } else { - if !strings.Contains(err.Error(), expectedError) { - assert.ErrorIs(t, err, errors.New(expectedError)) - return false - } - return true - } -} - -// Asserts as to whether an error has been raised as expected (or not). If an expected -// error has been raised it will return true, returns false in all other cases. -func AssertErrors( - t *testing.T, - description string, - errs []any, - expectedError string, -) bool { - if expectedError == "" { - assert.Empty(t, errs, description) - } else { - for _, e := range errs { - // This is always a string at the moment, add support for other types as and when needed - errorString := e.(string) - if !strings.Contains(errorString, expectedError) { - // We use ErrorIs for clearer failures (is a error comparision even if it is just a string) - assert.ErrorIs(t, errors.New(errorString), errors.New(expectedError)) - continue - } - return true - } - } - return false -} - -func checkIfDatabaseFormatChangesAreDocumented() bool { - previousDbChangeFiles, targetDirFound := getDatabaseFormatDocumentation( - detectDbChangesCodeDir, - false, - ) - if !targetDirFound { - panic("Documentation directory not found") - } - - previousDbChanges := make(map[string]struct{}, len(previousDbChangeFiles)) - for _, f := range previousDbChangeFiles { - // Note: we assume flat directory for now - sub directories are not expanded - previousDbChanges[f.Name()] = struct{}{} - } - - _, thisFilePath, _, _ := runtime.Caller(0) - currentDbChanges, currentDirFound := getDatabaseFormatDocumentation(thisFilePath, true) - if !currentDirFound { - panic("Documentation directory not found") - } - - for _, f := range currentDbChanges { - if _, isChangeOld := previousDbChanges[f.Name()]; !isChangeOld { - // If there is a new file in the directory then the change - // has been documented and the test should pass - return true - } - } - - return false -} - -func getDatabaseFormatDocumentation(startPath string, allowDescend bool) ([]fs.DirEntry, bool) { - startInfo, err := os.Stat(startPath) - if err != nil { - panic(err) - } - - var currentDirectory string - if startInfo.IsDir() { - currentDirectory = startPath - } else { - currentDirectory = path.Dir(startPath) - } - - for { - directoryContents, err := os.ReadDir(currentDirectory) - if err != nil { - panic(err) - } - - for _, directoryItem := range directoryContents { - directoryItemPath := path.Join(currentDirectory, directoryItem.Name()) - if directoryItem.Name() == documentationDirectoryName { - probableFormatChangeDirectoryContents, err := os.ReadDir(directoryItemPath) - if err != nil { - panic(err) - } - for _, possibleDocumentationItem := range probableFormatChangeDirectoryContents { - if path.Ext(possibleDocumentationItem.Name()) == ".md" { - // If the directory's name matches the expected, and contains .md files - // we assume it is the documentation directory - return probableFormatChangeDirectoryContents, true - } - } - } else { - if directoryItem.IsDir() { - childContents, directoryFound := getDatabaseFormatDocumentation(directoryItemPath, false) - if directoryFound { - return childContents, true - } - } - } - } - - if allowDescend { - // If not found in this directory, continue down the path - currentDirectory = path.Dir(currentDirectory) - - if currentDirectory == "." || currentDirectory == "/" { - panic("Database documentation directory not found") - } - } else { - return []fs.DirEntry{}, false - } - } -} - -func getLatestCommit(repoName string, branchName string) string { - cmd := exec.Command("git", "ls-remote", repoName, "refs/heads/"+branchName) - result, err := cmd.Output() - if err != nil { - panic(err) - } - - // This is a tab, not a space! - seperator := "\t" - return strings.Split(string(result), seperator)[0] -} diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go new file mode 100644 index 0000000000..a7fe355293 --- /dev/null +++ b/tests/integration/utils2.go @@ -0,0 +1,781 @@ +// Copyright 2022 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "context" + "os" + "strings" + "syscall" + "testing" + "time" + + badger "github.com/dgraph-io/badger/v3" + ds "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore" + badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/logging" +) + +const ( + memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" + fileBadgerEnvName = "DEFRA_BADGER_FILE" + fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" + inMemoryEnvName = "DEFRA_IN_MEMORY" + setupOnlyEnvName = "DEFRA_SETUP_ONLY" + detectDbChangesEnvName = "DEFRA_DETECT_DATABASE_CHANGES" + repositoryEnvName = "DEFRA_CODE_REPOSITORY" + targetBranchEnvName = "DEFRA_TARGET_BRANCH" + documentationDirectoryName = "data_format_changes" +) + +// The integration tests open many files. This increases the limits on the number of open files of +// the process to fix this issue. This is done by default in Go 1.19. +func init() { + var lim syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim); err == nil && lim.Cur != lim.Max { + lim.Cur = lim.Max + err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lim) + if err != nil { + log.ErrorE(context.Background(), "error setting rlimit", err) + } + } +} + +var ( + log = logging.MustNewLogger("defra.tests.integration") + badgerInMemory bool + badgerFile bool + inMemoryStore bool +) + +const subscriptionTimeout = 1 * time.Second + +type databaseInfo struct { + name string + path string + db client.DB + rootstore ds.Batching +} + +func (dbi databaseInfo) Name() string { + return dbi.name +} + +func (dbi databaseInfo) Rootstore() ds.Batching { + return dbi.rootstore +} + +func (dbi databaseInfo) DB() client.DB { + return dbi.db +} + +var databaseDir string + +/* +If this is set to true the integration test suite will instead of it's normal profile do +the following: + +On [package] Init: + - Get the (local) latest commit from the target/parent branch // code assumes + git fetch has been done + - Check to see if a clone of that commit/branch is available in the temp dir, and + if not clone the target branch + - Check to see if there are any new .md files in the current branch's data_format_changes + dir (vs the target branch) + +For each test: + - If new documentation detected, pass the test and exit + - Create a new (test/auto-deleted) temp dir for defra to live/run in + - Run the test setup (add initial schema, docs, updates) using the target branch (test is skipped + if test does not exist in target and is new to this branch) + - Run the test request and assert results (as per normal tests) using the current branch +*/ +var DetectDbChanges bool +var SetupOnly bool + +var detectDbChangesCodeDir string +var areDatabaseFormatChangesDocumented bool +var previousTestCaseTestName string + +func init() { + // We use environment variables instead of flags `go test ./...` throws for all packages + // that don't have the flag defined + badgerFileValue, _ := os.LookupEnv(fileBadgerEnvName) + badgerInMemoryValue, _ := os.LookupEnv(memoryBadgerEnvName) + databaseDir, _ = os.LookupEnv(fileBadgerPathEnvName) + detectDbChangesValue, _ := os.LookupEnv(detectDbChangesEnvName) + inMemoryStoreValue, _ := os.LookupEnv(inMemoryEnvName) + repositoryValue, repositorySpecified := os.LookupEnv(repositoryEnvName) + setupOnlyValue, _ := os.LookupEnv(setupOnlyEnvName) + targetBranchValue, targetBranchSpecified := os.LookupEnv(targetBranchEnvName) + + badgerFile = getBool(badgerFileValue) + badgerInMemory = getBool(badgerInMemoryValue) + inMemoryStore = getBool(inMemoryStoreValue) + DetectDbChanges = getBool(detectDbChangesValue) + SetupOnly = getBool(setupOnlyValue) + + if !repositorySpecified { + repositoryValue = "git@github.com:sourcenetwork/defradb.git" + } + + if !targetBranchSpecified { + targetBranchValue = "develop" + } + + // default is to run against all + if !badgerInMemory && !badgerFile && !inMemoryStore && !DetectDbChanges { + badgerInMemory = true + // Testing against the file system is off by default + badgerFile = false + inMemoryStore = true + } + + if DetectDbChanges { + detectDbChangesInit(repositoryValue, targetBranchValue) + } +} + +func getBool(val string) bool { + switch strings.ToLower(val) { + case "true": + return true + default: + return false + } +} + +// AssertPanicAndSkipChangeDetection asserts that the code of function actually panics, +// +// also ensures the change detection is skipped so no false fails happen. +// +// Usage: AssertPanicAndSkipChangeDetection(t, func() { executeTestCase(t, test) }) +func AssertPanicAndSkipChangeDetection(t *testing.T, f assert.PanicTestFunc) bool { + if IsDetectingDbChanges() { + // The `assert.Panics` call will falsely fail if this test is executed during + // a detect changes test run + t.Skip() + } + return assert.Panics(t, f, "expected a panic, but none found.") +} + +func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (databaseInfo, error) { + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + if err != nil { + return databaseInfo{}, err + } + + dbopts = append(dbopts, db.WithUpdateEvents()) + + db, err := db.NewDB(ctx, rootstore, dbopts...) + if err != nil { + return databaseInfo{}, err + } + + return databaseInfo{ + name: "badger-in-memory", + db: db, + rootstore: rootstore, + }, nil +} + +func NewInMemoryDB(ctx context.Context) (databaseInfo, error) { + rootstore := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents()) + if err != nil { + return databaseInfo{}, err + } + + return databaseInfo{ + name: "defra-memory-datastore", + db: db, + rootstore: rootstore, + }, nil +} + +func NewBadgerFileDB(ctx context.Context, t testing.TB) (databaseInfo, error) { + var path string + if databaseDir == "" { + path = t.TempDir() + } else { + path = databaseDir + } + + return newBadgerFileDB(ctx, t, path) +} + +func newBadgerFileDB(ctx context.Context, t testing.TB, path string) (databaseInfo, error) { + opts := badgerds.Options{Options: badger.DefaultOptions(path)} + rootstore, err := badgerds.NewDatastore(path, &opts) + if err != nil { + return databaseInfo{}, err + } + + db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents()) + if err != nil { + return databaseInfo{}, err + } + + return databaseInfo{ + name: "badger-file-system", + path: path, + db: db, + rootstore: rootstore, + }, nil +} + +func GetDatabases(ctx context.Context, t *testing.T) ([]databaseInfo, error) { + databases := []databaseInfo{} + + if badgerInMemory { + badgerIMDatabase, err := NewBadgerMemoryDB(ctx) + if err != nil { + return nil, err + } + databases = append(databases, badgerIMDatabase) + } + + if badgerFile { + badgerIMDatabase, err := NewBadgerFileDB(ctx, t) + if err != nil { + return nil, err + } + databases = append(databases, badgerIMDatabase) + } + + if inMemoryStore { + inMemoryDatabase, err := NewInMemoryDB(ctx) + if err != nil { + return nil, err + } + databases = append(databases, inMemoryDatabase) + } + + return databases, nil +} + +// ExecuteTestCase executes the given TestCase against the configured database +// instances. +// +// Will also attempt to detect incompatible changes in the persisted data if +// configured to do so (the CI will do so, but disabled by default as it is slow). +func ExecuteTestCase( + t *testing.T, + collectionNames []string, + testCase TestCase, +) { + if DetectDbChanges && DetectDbChangesPreTestChecks(t, collectionNames) { + return + } + + ctx := context.Background() + dbs, err := GetDatabases(ctx, t) + require.Nil(t, err) + // Assert that this is not empty to protect against accidental mis-configurations, + // otherwise an empty set would silently pass all the tests. + require.NotEmpty(t, dbs) + + for _, dbi := range dbs { + executeTestCase(ctx, t, collectionNames, testCase, dbi) + dbi.db.Close(ctx) + } +} + +func executeTestCase( + ctx context.Context, + t *testing.T, + collectionNames []string, + testCase TestCase, + dbi databaseInfo, +) { + var done bool + log.Info(ctx, testCase.Description, logging.NewKV("Database", dbi.name)) + + if DetectDbChanges && !SetupOnly { + // Setup the database using the target branch, and then refresh the current instance + dbi = SetupDatabaseUsingTargetBranch(ctx, t, dbi, collectionNames) + } + + startActionIndex, endActionIndex := getActionRange(testCase) + collections := []client.Collection{} + documents := [][]*client.Document{} + txns := []datastore.Txn{} + allActionsDone := make(chan struct{}) + resultsChans := []subscriptionResult{} + + for i := startActionIndex; i <= endActionIndex; i++ { + switch action := testCase.Actions[i].(type) { + case SchemaUpdate: + if updateSchema(ctx, t, dbi.db, testCase, action) { + return + } + // If the schema was updated we need to refresh the collection definitions. + collections = getCollections(ctx, t, dbi.db, collectionNames) + + case CreateDoc: + if documents, done = createDoc(ctx, t, testCase, collections, documents, action); done { + return + } + + case UpdateDoc: + if updateDoc(ctx, t, testCase, collections, documents, action) { + return + } + + case TransactionRequest2: + if txns, done = executeTransactionRequest(ctx, t, dbi.db, txns, testCase, action); done { + return + } + + case TransactionCommit: + if commitTransaction(ctx, t, txns, testCase, action) { + return + } + + case SubscriptionRequest2: + var resultsChan subscriptionResult + resultsChan, done = executeSubscriptionRequest(ctx, t, allActionsDone, dbi.db, testCase, action) + if done { + return + } + resultsChans = append(resultsChans, resultsChan) + + case Request: + if executeRequest(ctx, t, dbi.db, testCase, action) { + return + } + + case SetupComplete: + // no-op, just continue. + + default: + t.Fatalf("Unknown action type %T", action) + } + } + + if len(resultsChans) > 0 { + // Notify any active subscriptions that all requests have been sent. + close(allActionsDone) + } + for _, rChans := range resultsChans { + select { + case subscriptionAssert := <-rChans.subscriptionAssert: + // We want to assert back in the main thread so failures get recorded properly + subscriptionAssert() + + // a safety in case the stream hangs or no results are expected. + case <-time.After(subscriptionTimeout): + if rChans.expectedTimeout { + continue + } + assert.Fail(t, "timeout occured while waiting for data stream", testCase.Description) + } + } +} + +// getActionRange returns the index of the first action to be run, and the last. +// +// Not all processes will run all actions - if this is a change detector run they +// will be split. +// +// If a SetupComplete action is provided, the actions will be split there, if not +// they will be split at the first non SchemaUpdate/CreateDoc/UpdateDoc action. +func getActionRange(testCase TestCase) (int, int) { + startIndex := 0 + endIndex := len(testCase.Actions) - 1 + + if !DetectDbChanges { + return startIndex, endIndex + } + + setupCompleteIndex := -1 + firstNonSetupIndex := -1 + +ActionLoop: + for i := range testCase.Actions { + switch testCase.Actions[i].(type) { + case SetupComplete: + setupCompleteIndex = i + // We dont care about anything else if this has been explicitly provided + break ActionLoop + + case SchemaUpdate, CreateDoc, UpdateDoc: + continue + + default: + firstNonSetupIndex = i + break ActionLoop + } + } + + if SetupOnly { + if setupCompleteIndex > -1 { + endIndex = setupCompleteIndex + } else if firstNonSetupIndex > -1 { + // -1 to exclude this index + endIndex = firstNonSetupIndex - 1 + } + } else { + if setupCompleteIndex > -1 { + // +1 to exclude the SetupComplete action + startIndex = setupCompleteIndex + 1 + } else if firstNonSetupIndex > -1 { + // We must not set this to -1 :) + startIndex = firstNonSetupIndex + } + } + + return startIndex, endIndex +} + +// getCollections returns all the collections of the given name, preserving order. +// +// If a given collection is not present in the database the value at the corresponding +// result-index will be nil. +func getCollections( + ctx context.Context, + t *testing.T, + db client.DB, + collectionNames []string, +) []client.Collection { + collections := make([]client.Collection, len(collectionNames)) + + allCollections, err := db.GetAllCollections(ctx) + require.Nil(t, err) + + for i, collectionName := range collectionNames { + for _, collection := range allCollections { + if collection.Name() == collectionName { + collections[i] = collection + break + } + } + } + return collections +} + +// updateSchema updates the schema using the given details. +func updateSchema( + ctx context.Context, + t *testing.T, + db client.DB, + testCase TestCase, + action SchemaUpdate, +) bool { + err := db.AddSchema(ctx, action.Schema) + return AssertError(t, testCase.Description, err, action.ExpectedError) +} + +// createDoc creates a document using the collection api and caches it in the +// given documents slice. +func createDoc( + ctx context.Context, + t *testing.T, + testCase TestCase, + collections []client.Collection, + documents [][]*client.Document, + action CreateDoc, +) ([][]*client.Document, bool) { + doc, err := client.NewDocFromJSON([]byte(action.Doc)) + if AssertError(t, testCase.Description, err, action.ExpectedError) { + return nil, true + } + + err = collections[action.CollectionId].Save(ctx, doc) + if AssertError(t, testCase.Description, err, action.ExpectedError) { + return nil, true + } + + if action.CollectionId >= len(documents) { + // Expand the slice if required, so that the document can be accessed by collection index + documents = append(documents, make([][]*client.Document, action.CollectionId-len(documents)+1)...) + } + documents[action.CollectionId] = append(documents[action.CollectionId], doc) + + return documents, false +} + +// updateDoc updates a document using the collection api. +func updateDoc( + ctx context.Context, + t *testing.T, + testCase TestCase, + collections []client.Collection, + documents [][]*client.Document, + action UpdateDoc, +) bool { + doc := documents[action.CollectionId][action.DocId] + + err := doc.SetWithJSON([]byte(action.Doc)) + if AssertError(t, testCase.Description, err, action.ExpectedError) { + return true + } + + err = collections[action.CollectionId].Save(ctx, doc) + return AssertError(t, testCase.Description, err, action.ExpectedError) +} + +// executeTransactionRequest executes the given transactional request. +// +// It will create and cache a new transaction if it is the first of the given +// TransactionId. If an error is returned the transaction will be discarded before +// this function returns. +func executeTransactionRequest( + ctx context.Context, + t *testing.T, + db client.DB, + txns []datastore.Txn, + testCase TestCase, + action TransactionRequest2, +) ([]datastore.Txn, bool) { + if action.TransactionId >= len(txns) { + // Extend the txn slice so this txn can fit and be accessed by TransactionId + txns = append(txns, make([]datastore.Txn, action.TransactionId-len(txns)+1)...) + } + + if txns[action.TransactionId] == nil { + // Create a new transaction if one does not already exist. + txn, err := db.NewTxn(ctx, false) + if AssertError(t, testCase.Description, err, action.ExpectedError) { + txn.Discard(ctx) + return nil, true + } + + txns[action.TransactionId] = txn + } + + result := db.ExecTransactionalRequest(ctx, action.Request, txns[action.TransactionId]) + done := assertRequestResults( + ctx, + t, + testCase.Description, + &result.GQL, + action.Results, + action.ExpectedError, + ) + + if done { + // Make sure to discard the transaction before exit, else an unwanted error + // may surface later (e.g. on database close). + txns[action.TransactionId].Discard(ctx) + return nil, true + } + + return txns, false +} + +// commitTransaction commits the given transaction. +// +// Will panic if the given transaction does not exist. Discards the transaction if +// an error is returned on commit. +func commitTransaction( + ctx context.Context, + t *testing.T, + txns []datastore.Txn, + testCase TestCase, + action TransactionCommit, +) bool { + err := txns[action.TransactionId].Commit(ctx) + if err != nil { + txns[action.TransactionId].Discard(ctx) + } + + return AssertError(t, testCase.Description, err, action.ExpectedError) +} + +// executeRequest executes the given request. +func executeRequest( + ctx context.Context, + t *testing.T, + db client.DB, + testCase TestCase, + action Request, +) bool { + result := db.ExecRequest(ctx, action.Request) + return assertRequestResults( + ctx, + t, + testCase.Description, + &result.GQL, + action.Results, + action.ExpectedError, + ) +} + +// subscriptionResult wraps details required to assert that the +// subscription recieves all expected results whilst it remains +// active. +type subscriptionResult struct { + // If true, this subscription expects to timeout. + expectedTimeout bool + + // A channel that will recieve a function that asserts that + // the subscription recieved all its expected results and no more. + // It should be called from the main test routine to ensure that + // failures are recorded properly. It will only yield once, once + // the subscription has terminated. + subscriptionAssert chan func() +} + +// executeSubscriptionRequest executes the given subscription request, returning +// a channel that will recieve a single event once the subscription has been completed. +func executeSubscriptionRequest( + ctx context.Context, + t *testing.T, + allActionsDone chan struct{}, + db client.DB, + testCase TestCase, + action SubscriptionRequest2, +) (subscriptionResult, bool) { + resultChan := subscriptionResult{ + expectedTimeout: action.ExpectedTimout, + subscriptionAssert: make(chan func()), + } + + result := db.ExecRequest(ctx, action.Request) + if AssertErrors(t, testCase.Description, result.GQL.Errors, action.ExpectedError) { + return subscriptionResult{}, true + } + + go func() { + data := []map[string]any{} + errs := []any{} + + stream := result.Pub.Stream() + for { + select { + case s := <-stream: + sResult, _ := s.(client.GQLResult) + sData, _ := sResult.Data.([]map[string]any) + errs = append(errs, sResult.Errors...) + data = append(data, sData...) + + case <-allActionsDone: + // Once all other actions have been completed, sleep. + // This is a lazy way to allow the subscription to recieve + // the events generated, and to ensure that no more than are + // expected are recieved. It should probably be done in a better + // way than this at somepoint, but is good enough for now. + time.Sleep(subscriptionTimeout) + + finalResult := &client.GQLResult{ + Data: data, + Errors: errs, + } + + resultChan.subscriptionAssert <- func() { + // This assert should be executed from the main test routine + // so that failures will be properly handled. + assertRequestResults( + ctx, + t, + testCase.Description, + finalResult, + action.Results, + action.ExpectedError, + ) + } + + return + } + } + }() + + return resultChan, false +} + +// Asserts as to whether an error has been raised as expected (or not). If an expected +// error has been raised it will return true, returns false in all other cases. +func AssertError(t *testing.T, description string, err error, expectedError string) bool { + if err == nil { + return false + } + + if expectedError == "" { + assert.NoError(t, err, description) + return false + } else { + if !strings.Contains(err.Error(), expectedError) { + assert.ErrorIs(t, err, errors.New(expectedError)) + return false + } + return true + } +} + +// Asserts as to whether an error has been raised as expected (or not). If an expected +// error has been raised it will return true, returns false in all other cases. +func AssertErrors( + t *testing.T, + description string, + errs []any, + expectedError string, +) bool { + if expectedError == "" { + assert.Empty(t, errs, description) + } else { + for _, e := range errs { + // This is always a string at the moment, add support for other types as and when needed + errorString := e.(string) + if !strings.Contains(errorString, expectedError) { + // We use ErrorIs for clearer failures (is a error comparision even if it is just a string) + assert.ErrorIs(t, errors.New(errorString), errors.New(expectedError)) + continue + } + return true + } + } + return false +} + +func assertRequestResults( + ctx context.Context, + t *testing.T, + description string, + result *client.GQLResult, + expectedResults []map[string]any, + expectedError string, +) bool { + if AssertErrors(t, description, result.Errors, expectedError) { + return true + } + + if expectedResults == nil && result.Data == nil { + return true + } + + // Note: if result.Data == nil this panics (the panic seems useful while testing). + resultantData := result.Data.([]map[string]any) + + log.Info(ctx, "", logging.NewKV("RequestResults", result.Data)) + + // compare results + assert.Equal(t, len(expectedResults), len(resultantData), description) + if len(expectedResults) == 0 { + assert.Equal(t, expectedResults, resultantData) + } + for i, result := range resultantData { + if len(expectedResults) > i { + assert.Equal(t, expectedResults[i], result, description) + } + } + + return false +} From d6ef44c719c061a9f4d04651d0f894ce5c6eb483 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 8 Feb 2023 19:53:20 -0500 Subject: [PATCH 03/12] Make old test framework to use new framework Changes the old test framework so that it is essentially syntax sugar on top of the new framework. This is temporary, and is done to allow us to introduce (and review) the new framework without having to migrate all our existing tests to the new framework. The old framework should be removed as soon as is convienent, and can be done so bit by bit. It also ensures that the new framework contains all the functionality of the old framework. Explain and collection tests use a different framework. Those frameworks have not been migrated here, but their tests should be migrated at some point - it should be fairly straight forward to extend the new test framework to do so. New integration tests should be written using the new framework only. This includes collection tests and explain tests. --- tests/integration/utils.go | 249 ++++++++++++------------------------- 1 file changed, 79 insertions(+), 170 deletions(-) diff --git a/tests/integration/utils.go b/tests/integration/utils.go index 216745c5ac..3afd13c183 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -13,15 +13,10 @@ package tests import ( "context" "testing" - "time" "github.com/sourcenetwork/immutable" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/logging" ) // Represents a subscription request. @@ -80,187 +75,101 @@ func ExecuteRequestTestCase( collectionNames []string, test RequestTestCase, ) { - isTransactional := len(test.TransactionalRequests) > 0 - - if DetectDbChanges && DetectDbChangesPreTestChecks(t, collectionNames, isTransactional) { - return - } - - // Must have a non-empty request. - if !isTransactional && test.Request == "" { - assert.Fail(t, "Test must have a non-empty request.", test.Description) + actions := []any{ + SchemaUpdate{ + Schema: schema, + }, } - ctx := context.Background() - dbs, err := GetDatabases(ctx, t) - if AssertError(t, test.Description, err, test.ExpectedError) { - return - } - require.NotEmpty(t, dbs) - - for _, dbi := range dbs { - log.Info(ctx, test.Description, logging.NewKV("Database", dbi.name)) - - if DetectDbChanges { - if SetupOnly { - SetupDatabase( - ctx, - t, - dbi, - schema, - collectionNames, - test.Description, - test.ExpectedError, - test.Docs, - immutable.Some(test.Updates), - ) - dbi.db.Close(ctx) - return - } - - dbi = SetupDatabaseUsingTargetBranch(ctx, t, dbi, collectionNames) - } else { - SetupDatabase( - ctx, - t, - dbi, - schema, - collectionNames, - test.Description, - test.ExpectedError, - test.Docs, - immutable.Some(test.Updates), + for collectionIndex, docs := range test.Docs { + for _, doc := range docs { + actions = append( + actions, + CreateDoc{ + CollectionId: collectionIndex, + Doc: doc, + }, ) } + } - // Create the transactions before executing the requests. - transactions := make([]datastore.Txn, 0, len(test.TransactionalRequests)) - erroredRequests := make([]bool, len(test.TransactionalRequests)) - for i, tq := range test.TransactionalRequests { - if len(transactions) < tq.TransactionId { - continue - } - - txn, err := dbi.db.NewTxn(ctx, false) - if err != nil { - if AssertError(t, test.Description, err, tq.ExpectedError) { - erroredRequests[i] = true - } - } - defer txn.Discard(ctx) - if len(transactions) <= tq.TransactionId { - transactions = transactions[:tq.TransactionId+1] - } - transactions[tq.TransactionId] = txn - } - - for i, tq := range test.TransactionalRequests { - if erroredRequests[i] { - continue - } - result := dbi.db.ExecTransactionalRequest(ctx, tq.Request, transactions[tq.TransactionId]) - if assertRequestResults(ctx, t, test.Description, &result.GQL, tq.Results, tq.ExpectedError) { - erroredRequests[i] = true + for collectionIndex, docUpdates := range test.Updates { + for docIndex, docs := range docUpdates { + for _, doc := range docs { + actions = append( + actions, + UpdateDoc{ + CollectionId: collectionIndex, + DocId: docIndex, + Doc: doc, + }, + ) } } + } - txnIndexesCommited := map[int]struct{}{} - for i, tq := range test.TransactionalRequests { - if erroredRequests[i] { - continue - } - if _, alreadyCommited := txnIndexesCommited[tq.TransactionId]; alreadyCommited { - continue - } - txnIndexesCommited[tq.TransactionId] = struct{}{} - - err := transactions[tq.TransactionId].Commit(ctx) - if AssertError(t, test.Description, err, tq.ExpectedError) { - erroredRequests[i] = true - } - } + for _, request := range test.TransactionalRequests { + actions = append( + actions, + TransactionRequest2(request), + ) + } - for i, tq := range test.TransactionalRequests { - if tq.ExpectedError != "" && !erroredRequests[i] { - assert.Fail(t, "Expected an error however none was raised.", test.Description) - } + // The old test framework commited all the transactions at the end + // so we can just lump these here, they must however be commited in + // the order in which they were first recieved. + txnIndexesCommited := map[int]struct{}{} + for _, request := range test.TransactionalRequests { + if _, alreadyCommited := txnIndexesCommited[request.TransactionId]; alreadyCommited { + // Only commit each transaction once. + continue } - // We run the core request after the explicitly transactional ones to permit tests to actually - // call the request on the commited result of the transactional requests. - if !isTransactional || (isTransactional && test.Request != "") { - result := dbi.db.ExecRequest(ctx, test.Request) - if result.Pub != nil { - for _, q := range test.PostSubscriptionRequests { - dbi.db.ExecRequest(ctx, q.Request) - data := []map[string]any{} - errs := []any{} - if len(q.Results) > 1 { - for range q.Results { - select { - case s := <-result.Pub.Stream(): - sResult, _ := s.(client.GQLResult) - sData, _ := sResult.Data.([]map[string]any) - errs = append(errs, sResult.Errors...) - data = append(data, sData...) - // a safety in case the stream hangs. - case <-time.After(subscriptionTimeout): - assert.Fail(t, "timeout occured while waiting for data stream", test.Description) - } - } - } else { - select { - case s := <-result.Pub.Stream(): - sResult, _ := s.(client.GQLResult) - sData, _ := sResult.Data.([]map[string]any) - errs = append(errs, sResult.Errors...) - data = append(data, sData...) - // a safety in case the stream hangs or no results are expected. - case <-time.After(subscriptionTimeout): - if q.ExpectedTimout { - continue - } - assert.Fail(t, "timeout occured while waiting for data stream", test.Description) - } - } - gqlResult := &client.GQLResult{ - Data: data, - Errors: errs, - } - if assertRequestResults( - ctx, - t, - test.Description, - gqlResult, - q.Results, - q.ExpectedError, - ) { - continue - } - } - result.Pub.Unsubscribe() - } else { - if assertRequestResults( - ctx, - t, - test.Description, - &result.GQL, - test.Results, - test.ExpectedError, - ) { - continue - } + txnIndexesCommited[request.TransactionId] = struct{}{} + actions = append( + actions, + TransactionCommit{ + TransactionId: request.TransactionId, + ExpectedError: request.ExpectedError, + }, + ) + } - if test.ExpectedError != "" { - assert.Fail(t, "Expected an error however none was raised.", test.Description) - } - } - } + if test.Request != "" { + actions = append( + actions, + Request{ + ExpectedError: test.ExpectedError, + Request: test.Request, + Results: test.Results, + }, + ) + } - dbi.db.Close(ctx) + for _, request := range test.PostSubscriptionRequests { + actions = append( + actions, + SubscriptionRequest2{ + ExpectedError: request.ExpectedError, + Request: request.Request, + Results: request.Results, + ExpectedTimout: request.ExpectedTimout, + }, + ) } + + ExecuteTestCase( + t, + collectionNames, + TestCase{ + Description: test.Description, + Actions: actions, + }, + ) } +// SetupDatabase is persisted for the sake of the explain tests as they use a different +// test executor that calls this function. func SetupDatabase( ctx context.Context, t *testing.T, From 458d3c618054b0da8845c3230df277f1f0de1231 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 15 Feb 2023 11:32:42 -0500 Subject: [PATCH 04/12] PR FIXUP - ID not Id --- tests/integration/testCase.go | 4 ++-- tests/integration/utils.go | 4 ++-- tests/integration/utils2.go | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/integration/testCase.go b/tests/integration/testCase.go index 8e7e36c605..822c6a5f42 100644 --- a/tests/integration/testCase.go +++ b/tests/integration/testCase.go @@ -44,7 +44,7 @@ type SchemaUpdate struct { // using the collection api. type CreateDoc struct { // The collection in which this document should be created. - CollectionId int + CollectionID int // The document to create, in JSON string format. Doc string @@ -60,7 +60,7 @@ type CreateDoc struct { // using the collection api. type UpdateDoc struct { // The collection in which this document exists. - CollectionId int + CollectionID int // The index-identifier of the document within the collection. This is based on // the order in which it was created, not the ordering of the document within the diff --git a/tests/integration/utils.go b/tests/integration/utils.go index 3afd13c183..008b54df6a 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -86,7 +86,7 @@ func ExecuteRequestTestCase( actions = append( actions, CreateDoc{ - CollectionId: collectionIndex, + CollectionID: collectionIndex, Doc: doc, }, ) @@ -99,7 +99,7 @@ func ExecuteRequestTestCase( actions = append( actions, UpdateDoc{ - CollectionId: collectionIndex, + CollectionID: collectionIndex, DocId: docIndex, Doc: doc, }, diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index a7fe355293..4a32b5d19c 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -498,16 +498,16 @@ func createDoc( return nil, true } - err = collections[action.CollectionId].Save(ctx, doc) + err = collections[action.CollectionID].Save(ctx, doc) if AssertError(t, testCase.Description, err, action.ExpectedError) { return nil, true } - if action.CollectionId >= len(documents) { + if action.CollectionID >= len(documents) { // Expand the slice if required, so that the document can be accessed by collection index - documents = append(documents, make([][]*client.Document, action.CollectionId-len(documents)+1)...) + documents = append(documents, make([][]*client.Document, action.CollectionID-len(documents)+1)...) } - documents[action.CollectionId] = append(documents[action.CollectionId], doc) + documents[action.CollectionID] = append(documents[action.CollectionID], doc) return documents, false } @@ -521,14 +521,14 @@ func updateDoc( documents [][]*client.Document, action UpdateDoc, ) bool { - doc := documents[action.CollectionId][action.DocId] + doc := documents[action.CollectionID][action.DocId] err := doc.SetWithJSON([]byte(action.Doc)) if AssertError(t, testCase.Description, err, action.ExpectedError) { return true } - err = collections[action.CollectionId].Save(ctx, doc) + err = collections[action.CollectionID].Save(ctx, doc) return AssertError(t, testCase.Description, err, action.ExpectedError) } From 839e25a6baa409077261bd1d70fe5e15c0808427 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 15 Feb 2023 11:36:32 -0500 Subject: [PATCH 05/12] PR FIXUP - Fix spellings --- tests/integration/testCase.go | 4 ++-- tests/integration/utils.go | 8 ++++---- tests/integration/utils2.go | 14 +++++++------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/integration/testCase.go b/tests/integration/testCase.go index 822c6a5f42..f8096dd30c 100644 --- a/tests/integration/testCase.go +++ b/tests/integration/testCase.go @@ -97,7 +97,7 @@ type Request struct { // // A new transaction will be created for the first TransactionRequest2 of any given // TransactionId. TransactionRequest2s will be submitted to the database in the order -// in which they are recieved (interleaving amoungst other actions if provided), however +// in which they are recieved (interleaving amongst other actions if provided), however // they will not be commited until a TransactionCommit of matching TransactionId is // provided. type TransactionRequest2 struct { @@ -139,7 +139,7 @@ type SubscriptionRequest2 struct { // If set to true, the request should yield no results and should instead timeout. // The timeout is duration is that of subscriptionTimeout (1 second). - ExpectedTimout bool + ExpectedTimeout bool // The expected (data) results yielded through the subscription across its lifetime. Results []map[string]any diff --git a/tests/integration/utils.go b/tests/integration/utils.go index 008b54df6a..372628b8e8 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -150,10 +150,10 @@ func ExecuteRequestTestCase( actions = append( actions, SubscriptionRequest2{ - ExpectedError: request.ExpectedError, - Request: request.Request, - Results: request.Results, - ExpectedTimout: request.ExpectedTimout, + ExpectedError: request.ExpectedError, + Request: request.Request, + Results: request.Results, + ExpectedTimeout: request.ExpectedTimout, }, ) } diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 4a32b5d19c..a2a0a18528 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -88,7 +88,7 @@ func (dbi databaseInfo) DB() client.DB { var databaseDir string /* -If this is set to true the integration test suite will instead of it's normal profile do +If this is set to true the integration test suite will instead of its normal profile do the following: On [package] Init: @@ -445,7 +445,7 @@ ActionLoop: return startIndex, endIndex } -// getCollections returns all the collections of the given name, preserving order. +// getCollections returns all the collections of the given names, preserving order. // // If a given collection is not present in the database the value at the corresponding // result-index will be nil. @@ -620,14 +620,14 @@ func executeRequest( } // subscriptionResult wraps details required to assert that the -// subscription recieves all expected results whilst it remains +// subscription receives all expected results whilst it remains // active. type subscriptionResult struct { // If true, this subscription expects to timeout. expectedTimeout bool - // A channel that will recieve a function that asserts that - // the subscription recieved all its expected results and no more. + // A channel that will receive a function that asserts that + // the subscription received all its expected results and no more. // It should be called from the main test routine to ensure that // failures are recorded properly. It will only yield once, once // the subscription has terminated. @@ -635,7 +635,7 @@ type subscriptionResult struct { } // executeSubscriptionRequest executes the given subscription request, returning -// a channel that will recieve a single event once the subscription has been completed. +// a channel that will receive a single event once the subscription has been completed. func executeSubscriptionRequest( ctx context.Context, t *testing.T, @@ -645,7 +645,7 @@ func executeSubscriptionRequest( action SubscriptionRequest2, ) (subscriptionResult, bool) { resultChan := subscriptionResult{ - expectedTimeout: action.ExpectedTimout, + expectedTimeout: action.ExpectedTimeout, subscriptionAssert: make(chan func()), } From aefa8be9b214831d8dfe2af514a9fa1c51eb719a Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 15 Feb 2023 11:43:19 -0500 Subject: [PATCH 06/12] PR FIXUP - Drop variable value from comment --- tests/integration/testCase.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/testCase.go b/tests/integration/testCase.go index f8096dd30c..7f7582f8f1 100644 --- a/tests/integration/testCase.go +++ b/tests/integration/testCase.go @@ -138,7 +138,7 @@ type SubscriptionRequest2 struct { Request string // If set to true, the request should yield no results and should instead timeout. - // The timeout is duration is that of subscriptionTimeout (1 second). + // The timeout duration is that of subscriptionTimeout. ExpectedTimeout bool // The expected (data) results yielded through the subscription across its lifetime. From 1632631aa23af20f2d652133546470db4f8f2ef1 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 15 Feb 2023 11:45:20 -0500 Subject: [PATCH 07/12] PR FIXUP - DocID not DocId --- tests/integration/testCase.go | 2 +- tests/integration/utils.go | 2 +- tests/integration/utils2.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/testCase.go b/tests/integration/testCase.go index 7f7582f8f1..c9af54f781 100644 --- a/tests/integration/testCase.go +++ b/tests/integration/testCase.go @@ -65,7 +65,7 @@ type UpdateDoc struct { // The index-identifier of the document within the collection. This is based on // the order in which it was created, not the ordering of the document within the // database. - DocId int + DocID int // The document update, in JSON string format. Will only update the properties // provided. diff --git a/tests/integration/utils.go b/tests/integration/utils.go index 372628b8e8..878ce4b162 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -100,7 +100,7 @@ func ExecuteRequestTestCase( actions, UpdateDoc{ CollectionID: collectionIndex, - DocId: docIndex, + DocID: docIndex, Doc: doc, }, ) diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index a2a0a18528..005ebff45e 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -521,7 +521,7 @@ func updateDoc( documents [][]*client.Document, action UpdateDoc, ) bool { - doc := documents[action.CollectionID][action.DocId] + doc := documents[action.CollectionID][action.DocID] err := doc.SetWithJSON([]byte(action.Doc)) if AssertError(t, testCase.Description, err, action.ExpectedError) { From 50eb0bdcab2f3f795052832a5f7dd07474fadd40 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 15 Feb 2023 12:15:31 -0500 Subject: [PATCH 08/12] PR FIXUP - Remove file camel casing Are numbers upper case? --- tests/integration/{changeDetector.go => change_detector.go} | 0 tests/integration/{testCase.go => test_case.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/integration/{changeDetector.go => change_detector.go} (100%) rename tests/integration/{testCase.go => test_case.go} (100%) diff --git a/tests/integration/changeDetector.go b/tests/integration/change_detector.go similarity index 100% rename from tests/integration/changeDetector.go rename to tests/integration/change_detector.go diff --git a/tests/integration/testCase.go b/tests/integration/test_case.go similarity index 100% rename from tests/integration/testCase.go rename to tests/integration/test_case.go From 64b490ac4191a9362b0838eea1e55f7f034575c2 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 15 Feb 2023 13:56:46 -0500 Subject: [PATCH 09/12] PR FIXUP - Fix subscription tests Sorry about the late change, there was a bug in the subscription mapping from the old system that resulted in these silently passing. It proved inconvienent to fix in the mapping as the old system did not specify the results for the triggering mutations which are required in the new system. I decided it would be better to just convert the existing tests to the new system. --- .../subscription/subscription_test.go | 208 +++++++++++------- tests/integration/subscription/utils.go | 34 ++- tests/integration/test_case.go | 8 +- tests/integration/utils.go | 27 --- tests/integration/utils2.go | 28 +-- 5 files changed, 160 insertions(+), 145 deletions(-) diff --git a/tests/integration/subscription/subscription_test.go b/tests/integration/subscription/subscription_test.go index 2a1d26953b..578f558cb2 100644 --- a/tests/integration/subscription/subscription_test.go +++ b/tests/integration/subscription/subscription_test.go @@ -17,44 +17,50 @@ import ( ) func TestSubscriptionWithCreateMutations(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Subscription with user creations", - Request: `subscription { + Actions: []any{ + testUtils.SubscriptionRequest{ + Request: `subscription { User { _key name age } }`, - PostSubscriptionRequests: []testUtils.SubscriptionRequest{ - { + Results: []map[string]any{ + { + "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", + "age": uint64(27), + "name": "John", + }, + { + "_key": "bae-18def051-7f0f-5dc9-8a69-2a5e423f6b55", + "age": uint64(31), + "name": "Addo", + }, + }, + }, + testUtils.Request{ Request: `mutation { create_User(data: "{\"name\": \"John\",\"age\": 27,\"points\": 42.1,\"verified\": true}") { - _key name - age } }`, Results: []map[string]any{ { - "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": uint64(27), "name": "John", }, }, }, - { + testUtils.Request{ Request: `mutation { create_User(data: "{\"name\": \"Addo\",\"age\": 31,\"points\": 42.1,\"verified\": true}") { - _key name - age } }`, Results: []map[string]any{ { - "_key": "bae-18def051-7f0f-5dc9-8a69-2a5e423f6b55", - "age": uint64(31), "name": "Addo", }, }, @@ -62,32 +68,37 @@ func TestSubscriptionWithCreateMutations(t *testing.T) { }, } - executeTestCase(t, test) + execute(t, test) } func TestSubscriptionWithFilterAndOneCreateMutation(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Subscription with filter and one user creation", - Request: `subscription { + Actions: []any{ + testUtils.SubscriptionRequest{ + Request: `subscription { User(filter: {age: {_lt: 30}}) { _key name age } }`, - PostSubscriptionRequests: []testUtils.SubscriptionRequest{ - { + Results: []map[string]any{ + { + "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", + "age": uint64(27), + "name": "John", + }, + }, + }, + testUtils.Request{ Request: `mutation { create_User(data: "{\"name\": \"John\",\"age\": 27,\"points\": 42.1,\"verified\": true}") { - _key name - age } }`, Results: []map[string]any{ { - "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": uint64(27), "name": "John", }, }, @@ -95,113 +106,120 @@ func TestSubscriptionWithFilterAndOneCreateMutation(t *testing.T) { }, } - executeTestCase(t, test) + execute(t, test) } func TestSubscriptionWithFilterAndOneCreateMutationOutsideFilter(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Subscription with filter and one user creation outside of the filter", - Request: `subscription { + Actions: []any{ + testUtils.SubscriptionRequest{ + Request: `subscription { User(filter: {age: {_gt: 30}}) { _key name age } }`, - PostSubscriptionRequests: []testUtils.SubscriptionRequest{ - { + Results: []map[string]any{}, + }, + testUtils.Request{ Request: `mutation { create_User(data: "{\"name\": \"John\",\"age\": 27,\"points\": 42.1,\"verified\": true}") { - _key name - age } }`, - ExpectedTimout: true, + Results: []map[string]any{ + { + "name": "John", + }, + }, }, }, } - executeTestCase(t, test) + execute(t, test) } func TestSubscriptionWithFilterAndCreateMutations(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Subscription with filter and user creation in and outside of the filter", - Request: `subscription { + Actions: []any{ + testUtils.SubscriptionRequest{ + Request: `subscription { User(filter: {age: {_lt: 30}}) { _key name age } }`, - PostSubscriptionRequests: []testUtils.SubscriptionRequest{ - { + Results: []map[string]any{ + { + "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", + "age": uint64(27), + "name": "John", + }, + }, + }, + testUtils.Request{ Request: `mutation { create_User(data: "{\"name\": \"John\",\"age\": 27,\"points\": 42.1,\"verified\": true}") { - _key name - age } }`, Results: []map[string]any{ { - "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": uint64(27), "name": "John", }, }, }, - { + testUtils.Request{ Request: `mutation { create_User(data: "{\"name\": \"Addo\",\"age\": 31,\"points\": 42.1,\"verified\": true}") { - _key name - age } }`, - ExpectedTimout: true, + Results: []map[string]any{ + { + "name": "Addo", + }, + }, }, }, } - executeTestCase(t, test) + execute(t, test) } func TestSubscriptionWithUpdateMutations(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Subscription with user creations", - Request: `subscription { - User { - _key - name - age - points - } - }`, - Docs: map[int][]string{ - 0: { - `{ + test := testUtils.TestCase{ + Description: "Subscription with user creations and single mutation", + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "John", "age": 27, "verified": true, "points": 42.1 }`, - `{ + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "Addo", "age": 35, "verified": true, "points": 50 }`, }, - }, - PostSubscriptionRequests: []testUtils.SubscriptionRequest{ - { - Request: `mutation { - update_User(filter: {name: {_eq: "John"}}, data: "{\"points\": 45}") { + testUtils.SubscriptionRequest{ + Request: `subscription { + User { _key name age + points } }`, Results: []map[string]any{ @@ -213,46 +231,53 @@ func TestSubscriptionWithUpdateMutations(t *testing.T) { }, }, }, + testUtils.Request{ + Request: `mutation { + update_User(filter: {name: {_eq: "John"}}, data: "{\"points\": 45}") { + name + } + }`, + Results: []map[string]any{ + { + "name": "John", + }, + }, + }, }, } - executeTestCase(t, test) + execute(t, test) } func TestSubscriptionWithUpdateAllMutations(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Subscription with user creations", - Request: `subscription { - User { - _key - name - age - points - } - }`, - Docs: map[int][]string{ - 0: { - `{ + test := testUtils.TestCase{ + Description: "Subscription with user creations and mutations for all", + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "John", "age": 27, "verified": true, "points": 42.1 }`, - `{ + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "Addo", "age": 31, "verified": true, "points": 50 }`, }, - }, - PostSubscriptionRequests: []testUtils.SubscriptionRequest{ - { - Request: `mutation { - update_User(data: "{\"points\": 55}") { + testUtils.SubscriptionRequest{ + Request: `subscription { + User { _key name age + points } }`, Results: []map[string]any{ @@ -270,8 +295,23 @@ func TestSubscriptionWithUpdateAllMutations(t *testing.T) { }, }, }, + testUtils.Request{ + Request: `mutation { + update_User(data: "{\"points\": 55}") { + name + } + }`, + Results: []map[string]any{ + { + "name": "John", + }, + { + "name": "Addo", + }, + }, + }, }, } - executeTestCase(t, test) + execute(t, test) } diff --git a/tests/integration/subscription/utils.go b/tests/integration/subscription/utils.go index 68bd6806c1..9c662eb3e9 100644 --- a/tests/integration/subscription/utils.go +++ b/tests/integration/subscription/utils.go @@ -16,15 +16,27 @@ import ( testUtils "github.com/sourcenetwork/defradb/tests/integration" ) -var userSchema = (` - type User { - name: String - age: Int - points: Float - verified: Boolean - } -`) - -func executeTestCase(t *testing.T, test testUtils.RequestTestCase) { - testUtils.ExecuteRequestTestCase(t, userSchema, []string{"User"}, test) +func execute(t *testing.T, test testUtils.TestCase) { + testUtils.ExecuteTestCase( + t, + []string{"User"}, + testUtils.TestCase{ + Description: test.Description, + Actions: append( + []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + points: Float + verified: Boolean + } + `, + }, + }, + test.Actions..., + ), + }, + ) } diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index c9af54f781..b6c589ed36 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -129,18 +129,14 @@ type TransactionCommit struct { ExpectedError string } -// SubscriptionRequest2 represents a subscription request. +// SubscriptionRequest represents a subscription request. // // The subscription will remain active until shortly after all actions have been processed. // The results of the subscription will then be asserted upon. -type SubscriptionRequest2 struct { +type SubscriptionRequest struct { // The subscription request to submit. Request string - // If set to true, the request should yield no results and should instead timeout. - // The timeout duration is that of subscriptionTimeout. - ExpectedTimeout bool - // The expected (data) results yielded through the subscription across its lifetime. Results []map[string]any diff --git a/tests/integration/utils.go b/tests/integration/utils.go index 878ce4b162..d507699721 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -19,18 +19,6 @@ import ( "github.com/sourcenetwork/defradb/client" ) -// Represents a subscription request. -type SubscriptionRequest struct { - Request string - // The expected (data) results of the issued request. - Results []map[string]any - // The expected error resulting from the issued request. - ExpectedError string - // If set to true, the request should yield no results. - // The timeout is duration is that of subscriptionTimeout (1 second) - ExpectedTimout bool -} - // Represents a request assigned to a particular transaction. type TransactionRequest struct { // Used to identify the transaction for this to run against (allows multiple @@ -48,9 +36,6 @@ type RequestTestCase struct { Description string Request string - // A collection of requests to exucute after the subscriber is listening on the stream - PostSubscriptionRequests []SubscriptionRequest - // A collection of requests that are tied to a specific transaction. // These will be executed before `Request` (if specified), in the order that they are listed here. TransactionalRequests []TransactionRequest @@ -146,18 +131,6 @@ func ExecuteRequestTestCase( ) } - for _, request := range test.PostSubscriptionRequests { - actions = append( - actions, - SubscriptionRequest2{ - ExpectedError: request.ExpectedError, - Request: request.Request, - Results: request.Results, - ExpectedTimeout: request.ExpectedTimout, - }, - ) - } - ExecuteTestCase( t, collectionNames, diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 005ebff45e..fdc31f0581 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -349,7 +349,7 @@ func executeTestCase( return } - case SubscriptionRequest2: + case SubscriptionRequest: var resultsChan subscriptionResult resultsChan, done = executeSubscriptionRequest(ctx, t, allActionsDone, dbi.db, testCase, action) if done { @@ -371,20 +371,25 @@ func executeTestCase( } if len(resultsChans) > 0 { + // Once all other actions have been completed, sleep. + // This is a lazy way to allow the subscription to recieve + // the events generated, and to ensure that no more than are + // expected are recieved. It should probably be done in a better + // way than this at somepoint, but is good enough for now. + time.Sleep(subscriptionTimeout) + // Notify any active subscriptions that all requests have been sent. close(allActionsDone) } + for _, rChans := range resultsChans { select { case subscriptionAssert := <-rChans.subscriptionAssert: // We want to assert back in the main thread so failures get recorded properly subscriptionAssert() - // a safety in case the stream hangs or no results are expected. + // a safety in case the stream hangs - we don't want the tests to run forever. case <-time.After(subscriptionTimeout): - if rChans.expectedTimeout { - continue - } assert.Fail(t, "timeout occured while waiting for data stream", testCase.Description) } } @@ -623,9 +628,6 @@ func executeRequest( // subscription receives all expected results whilst it remains // active. type subscriptionResult struct { - // If true, this subscription expects to timeout. - expectedTimeout bool - // A channel that will receive a function that asserts that // the subscription received all its expected results and no more. // It should be called from the main test routine to ensure that @@ -642,10 +644,9 @@ func executeSubscriptionRequest( allActionsDone chan struct{}, db client.DB, testCase TestCase, - action SubscriptionRequest2, + action SubscriptionRequest, ) (subscriptionResult, bool) { resultChan := subscriptionResult{ - expectedTimeout: action.ExpectedTimeout, subscriptionAssert: make(chan func()), } @@ -668,13 +669,6 @@ func executeSubscriptionRequest( data = append(data, sData...) case <-allActionsDone: - // Once all other actions have been completed, sleep. - // This is a lazy way to allow the subscription to recieve - // the events generated, and to ensure that no more than are - // expected are recieved. It should probably be done in a better - // way than this at somepoint, but is good enough for now. - time.Sleep(subscriptionTimeout) - finalResult := &client.GQLResult{ Data: data, Errors: errs, From e1d2f505555424957e1a0eca9cfee71fa0d795d2 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 15 Feb 2023 12:32:36 -0500 Subject: [PATCH 10/12] PR FIXUP - Assert expected error raised Also converts one of the transaction tests, as the old system does not specify which action should expect an error, just that one action should raise it. That is not particularly great in my opinion and this commit changes the new system so that as well as tying the error to a particular action (as it did before), actions declared after an action with an expected action will still be executed - the old system would instead exit early, the request in the old version of the converted test was never actually executed. --- .../mutation/simple/mix/with_txn_test.go | 53 ++++++----- tests/integration/mutation/simple/utils.go | 18 ++++ tests/integration/utils2.go | 87 ++++++++++--------- 3 files changed, 96 insertions(+), 62 deletions(-) diff --git a/tests/integration/mutation/simple/mix/with_txn_test.go b/tests/integration/mutation/simple/mix/with_txn_test.go index 6dc384d2cf..afaea855bb 100644 --- a/tests/integration/mutation/simple/mix/with_txn_test.go +++ b/tests/integration/mutation/simple/mix/with_txn_test.go @@ -214,18 +214,17 @@ func TestMutationWithTxnDoesNotUpdateUserGivenDifferentTransactions(t *testing.T } func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Update by two different transactions", - Docs: map[int][]string{ - 0: { - `{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ "name": "John", "age": 27 }`, }, - }, - TransactionalRequests: []testUtils.TransactionRequest{ - { + testUtils.TransactionRequest2{ TransactionId: 0, Request: `mutation { update_user(data: "{\"age\": 28}") { @@ -242,7 +241,7 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) }, }, }, - { + testUtils.TransactionRequest2{ TransactionId: 1, Request: `mutation { update_user(data: "{\"age\": 29}") { @@ -258,25 +257,33 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) "age": uint64(29), }, }, + }, + testUtils.TransactionCommit{ + TransactionId: 0, + }, + testUtils.TransactionCommit{ + TransactionId: 1, ExpectedError: "Transaction Conflict. Please retry", }, - }, - // Query after transactions have been commited: - Request: `query { - user { - _key - name - age - } - }`, - Results: []map[string]any{ - { - "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", - "name": "John", - "age": uint64(28), + testUtils.Request{ + // Query after transactions have been commited: + Request: `query { + user { + _key + name + age + } + }`, + Results: []map[string]any{ + { + "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", + "name": "John", + "age": uint64(28), + }, + }, }, }, } - simpleTests.ExecuteTestCase(t, test) + simpleTests.Execute(t, test) } diff --git a/tests/integration/mutation/simple/utils.go b/tests/integration/mutation/simple/utils.go index 6cd9f53737..422c0c16ae 100644 --- a/tests/integration/mutation/simple/utils.go +++ b/tests/integration/mutation/simple/utils.go @@ -28,3 +28,21 @@ var userSchema = (` func ExecuteTestCase(t *testing.T, test testUtils.RequestTestCase) { testUtils.ExecuteRequestTestCase(t, userSchema, []string{"user"}, test) } + +func Execute(t *testing.T, test testUtils.TestCase) { + testUtils.ExecuteTestCase( + t, + []string{"user"}, + testUtils.TestCase{ + Description: test.Description, + Actions: append( + []any{ + testUtils.SchemaUpdate{ + Schema: userSchema, + }, + }, + test.Actions..., + ), + }, + ) +} diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index fdc31f0581..25c22dc666 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -323,31 +323,21 @@ func executeTestCase( for i := startActionIndex; i <= endActionIndex; i++ { switch action := testCase.Actions[i].(type) { case SchemaUpdate: - if updateSchema(ctx, t, dbi.db, testCase, action) { - return - } + updateSchema(ctx, t, dbi.db, testCase, action) // If the schema was updated we need to refresh the collection definitions. collections = getCollections(ctx, t, dbi.db, collectionNames) case CreateDoc: - if documents, done = createDoc(ctx, t, testCase, collections, documents, action); done { - return - } + documents = createDoc(ctx, t, testCase, collections, documents, action) case UpdateDoc: - if updateDoc(ctx, t, testCase, collections, documents, action) { - return - } + updateDoc(ctx, t, testCase, collections, documents, action) case TransactionRequest2: - if txns, done = executeTransactionRequest(ctx, t, dbi.db, txns, testCase, action); done { - return - } + txns = executeTransactionRequest(ctx, t, dbi.db, txns, testCase, action) case TransactionCommit: - if commitTransaction(ctx, t, txns, testCase, action) { - return - } + commitTransaction(ctx, t, txns, testCase, action) case SubscriptionRequest: var resultsChan subscriptionResult @@ -358,9 +348,7 @@ func executeTestCase( resultsChans = append(resultsChans, resultsChan) case Request: - if executeRequest(ctx, t, dbi.db, testCase, action) { - return - } + executeRequest(ctx, t, dbi.db, testCase, action) case SetupComplete: // no-op, just continue. @@ -483,9 +471,11 @@ func updateSchema( db client.DB, testCase TestCase, action SchemaUpdate, -) bool { +) { err := db.AddSchema(ctx, action.Schema) - return AssertError(t, testCase.Description, err, action.ExpectedError) + expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError) + + assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) } // createDoc creates a document using the collection api and caches it in the @@ -497,24 +487,27 @@ func createDoc( collections []client.Collection, documents [][]*client.Document, action CreateDoc, -) ([][]*client.Document, bool) { +) [][]*client.Document { doc, err := client.NewDocFromJSON([]byte(action.Doc)) if AssertError(t, testCase.Description, err, action.ExpectedError) { - return nil, true + return nil } err = collections[action.CollectionID].Save(ctx, doc) - if AssertError(t, testCase.Description, err, action.ExpectedError) { - return nil, true + expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError) + if expectedErrorRaised { + return nil } + assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + if action.CollectionID >= len(documents) { // Expand the slice if required, so that the document can be accessed by collection index documents = append(documents, make([][]*client.Document, action.CollectionID-len(documents)+1)...) } documents[action.CollectionID] = append(documents[action.CollectionID], doc) - return documents, false + return documents } // updateDoc updates a document using the collection api. @@ -525,16 +518,18 @@ func updateDoc( collections []client.Collection, documents [][]*client.Document, action UpdateDoc, -) bool { +) { doc := documents[action.CollectionID][action.DocID] err := doc.SetWithJSON([]byte(action.Doc)) if AssertError(t, testCase.Description, err, action.ExpectedError) { - return true + return } err = collections[action.CollectionID].Save(ctx, doc) - return AssertError(t, testCase.Description, err, action.ExpectedError) + expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError) + + assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) } // executeTransactionRequest executes the given transactional request. @@ -549,7 +544,7 @@ func executeTransactionRequest( txns []datastore.Txn, testCase TestCase, action TransactionRequest2, -) ([]datastore.Txn, bool) { +) []datastore.Txn { if action.TransactionId >= len(txns) { // Extend the txn slice so this txn can fit and be accessed by TransactionId txns = append(txns, make([]datastore.Txn, action.TransactionId-len(txns)+1)...) @@ -560,14 +555,14 @@ func executeTransactionRequest( txn, err := db.NewTxn(ctx, false) if AssertError(t, testCase.Description, err, action.ExpectedError) { txn.Discard(ctx) - return nil, true + return nil } txns[action.TransactionId] = txn } result := db.ExecTransactionalRequest(ctx, action.Request, txns[action.TransactionId]) - done := assertRequestResults( + expectedErrorRaised := assertRequestResults( ctx, t, testCase.Description, @@ -576,14 +571,16 @@ func executeTransactionRequest( action.ExpectedError, ) - if done { + assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + + if expectedErrorRaised { // Make sure to discard the transaction before exit, else an unwanted error // may surface later (e.g. on database close). txns[action.TransactionId].Discard(ctx) - return nil, true + return nil } - return txns, false + return txns } // commitTransaction commits the given transaction. @@ -596,13 +593,15 @@ func commitTransaction( txns []datastore.Txn, testCase TestCase, action TransactionCommit, -) bool { +) { err := txns[action.TransactionId].Commit(ctx) if err != nil { txns[action.TransactionId].Discard(ctx) } - return AssertError(t, testCase.Description, err, action.ExpectedError) + expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError) + + assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) } // executeRequest executes the given request. @@ -612,9 +611,9 @@ func executeRequest( db client.DB, testCase TestCase, action Request, -) bool { +) { result := db.ExecRequest(ctx, action.Request) - return assertRequestResults( + expectedErrorRaised := assertRequestResults( ctx, t, testCase.Description, @@ -622,6 +621,8 @@ func executeRequest( action.Results, action.ExpectedError, ) + + assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) } // subscriptionResult wraps details required to assert that the @@ -677,7 +678,7 @@ func executeSubscriptionRequest( resultChan.subscriptionAssert <- func() { // This assert should be executed from the main test routine // so that failures will be properly handled. - assertRequestResults( + expectedErrorRaised := assertRequestResults( ctx, t, testCase.Description, @@ -685,6 +686,8 @@ func executeSubscriptionRequest( action.Results, action.ExpectedError, ) + + assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) } return @@ -773,3 +776,9 @@ func assertRequestResults( return false } + +func assertExpectedErrorRaised(t *testing.T, description string, expectedError string, wasRaised bool) { + if expectedError != "" && !wasRaised { + assert.Fail(t, "Expected an error however none was raised.", description) + } +} From 2ea5f10cdbf1341ae653375d73208045d94fd873 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 15 Feb 2023 17:18:15 -0500 Subject: [PATCH 11/12] PR FIXUP - Drop internal subscription struct --- tests/integration/utils2.go | 38 +++++++++++++++---------------------- 1 file changed, 15 insertions(+), 23 deletions(-) diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 25c22dc666..1523b16c63 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -318,7 +318,7 @@ func executeTestCase( documents := [][]*client.Document{} txns := []datastore.Txn{} allActionsDone := make(chan struct{}) - resultsChans := []subscriptionResult{} + resultsChans := []chan func(){} for i := startActionIndex; i <= endActionIndex; i++ { switch action := testCase.Actions[i].(type) { @@ -340,7 +340,7 @@ func executeTestCase( commitTransaction(ctx, t, txns, testCase, action) case SubscriptionRequest: - var resultsChan subscriptionResult + var resultsChan chan func() resultsChan, done = executeSubscriptionRequest(ctx, t, allActionsDone, dbi.db, testCase, action) if done { return @@ -370,9 +370,9 @@ func executeTestCase( close(allActionsDone) } - for _, rChans := range resultsChans { + for _, resultsChan := range resultsChans { select { - case subscriptionAssert := <-rChans.subscriptionAssert: + case subscriptionAssert := <-resultsChan: // We want to assert back in the main thread so failures get recorded properly subscriptionAssert() @@ -625,20 +625,14 @@ func executeRequest( assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) } -// subscriptionResult wraps details required to assert that the -// subscription receives all expected results whilst it remains -// active. -type subscriptionResult struct { - // A channel that will receive a function that asserts that - // the subscription received all its expected results and no more. - // It should be called from the main test routine to ensure that - // failures are recorded properly. It will only yield once, once - // the subscription has terminated. - subscriptionAssert chan func() -} - // executeSubscriptionRequest executes the given subscription request, returning // a channel that will receive a single event once the subscription has been completed. +// +// The returned channel will receive a function that asserts that +// the subscription received all its expected results and no more. +// It should be called from the main test routine to ensure that +// failures are recorded properly. It will only yield once, once +// the subscription has terminated. func executeSubscriptionRequest( ctx context.Context, t *testing.T, @@ -646,14 +640,12 @@ func executeSubscriptionRequest( db client.DB, testCase TestCase, action SubscriptionRequest, -) (subscriptionResult, bool) { - resultChan := subscriptionResult{ - subscriptionAssert: make(chan func()), - } +) (chan func(), bool) { + subscriptionAssert := make(chan func()) result := db.ExecRequest(ctx, action.Request) if AssertErrors(t, testCase.Description, result.GQL.Errors, action.ExpectedError) { - return subscriptionResult{}, true + return nil, true } go func() { @@ -675,7 +667,7 @@ func executeSubscriptionRequest( Errors: errs, } - resultChan.subscriptionAssert <- func() { + subscriptionAssert <- func() { // This assert should be executed from the main test routine // so that failures will be properly handled. expectedErrorRaised := assertRequestResults( @@ -695,7 +687,7 @@ func executeSubscriptionRequest( } }() - return resultChan, false + return subscriptionAssert, false } // Asserts as to whether an error has been raised as expected (or not). If an expected From 290d0a2994468b92ee7c9810ceb45ca98773fbc1 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Wed, 15 Feb 2023 18:00:20 -0500 Subject: [PATCH 12/12] PR FIXUP - TransactionID not TransactionId --- .../mutation/simple/mix/with_txn_test.go | 8 ++++---- tests/integration/test_case.go | 4 ++-- tests/integration/utils.go | 9 +++++++-- tests/integration/utils2.go | 16 ++++++++-------- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/tests/integration/mutation/simple/mix/with_txn_test.go b/tests/integration/mutation/simple/mix/with_txn_test.go index afaea855bb..ca7b6e995b 100644 --- a/tests/integration/mutation/simple/mix/with_txn_test.go +++ b/tests/integration/mutation/simple/mix/with_txn_test.go @@ -225,7 +225,7 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) }`, }, testUtils.TransactionRequest2{ - TransactionId: 0, + TransactionID: 0, Request: `mutation { update_user(data: "{\"age\": 28}") { _key @@ -242,7 +242,7 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) }, }, testUtils.TransactionRequest2{ - TransactionId: 1, + TransactionID: 1, Request: `mutation { update_user(data: "{\"age\": 29}") { _key @@ -259,10 +259,10 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) }, }, testUtils.TransactionCommit{ - TransactionId: 0, + TransactionID: 0, }, testUtils.TransactionCommit{ - TransactionId: 1, + TransactionID: 1, ExpectedError: "Transaction Conflict. Please retry", }, testUtils.Request{ diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index b6c589ed36..74970b6ee4 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -102,7 +102,7 @@ type Request struct { // provided. type TransactionRequest2 struct { // Used to identify the transaction for this to run against. - TransactionId int + TransactionID int // The request to run against the transaction. Request string @@ -120,7 +120,7 @@ type TransactionRequest2 struct { // TransactionCommit represents a commit request for a transaction of the given id. type TransactionCommit struct { // Used to identify the transaction to commit. - TransactionId int + TransactionID int // Any error expected from the action. Optional. // diff --git a/tests/integration/utils.go b/tests/integration/utils.go index d507699721..5f6f354fe7 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -96,7 +96,12 @@ func ExecuteRequestTestCase( for _, request := range test.TransactionalRequests { actions = append( actions, - TransactionRequest2(request), + TransactionRequest2{ + TransactionID: request.TransactionId, + Request: request.Request, + Results: request.Results, + ExpectedError: request.ExpectedError, + }, ) } @@ -114,7 +119,7 @@ func ExecuteRequestTestCase( actions = append( actions, TransactionCommit{ - TransactionId: request.TransactionId, + TransactionID: request.TransactionId, ExpectedError: request.ExpectedError, }, ) diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 1523b16c63..61842156ac 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -545,12 +545,12 @@ func executeTransactionRequest( testCase TestCase, action TransactionRequest2, ) []datastore.Txn { - if action.TransactionId >= len(txns) { + if action.TransactionID >= len(txns) { // Extend the txn slice so this txn can fit and be accessed by TransactionId - txns = append(txns, make([]datastore.Txn, action.TransactionId-len(txns)+1)...) + txns = append(txns, make([]datastore.Txn, action.TransactionID-len(txns)+1)...) } - if txns[action.TransactionId] == nil { + if txns[action.TransactionID] == nil { // Create a new transaction if one does not already exist. txn, err := db.NewTxn(ctx, false) if AssertError(t, testCase.Description, err, action.ExpectedError) { @@ -558,10 +558,10 @@ func executeTransactionRequest( return nil } - txns[action.TransactionId] = txn + txns[action.TransactionID] = txn } - result := db.ExecTransactionalRequest(ctx, action.Request, txns[action.TransactionId]) + result := db.ExecTransactionalRequest(ctx, action.Request, txns[action.TransactionID]) expectedErrorRaised := assertRequestResults( ctx, t, @@ -576,7 +576,7 @@ func executeTransactionRequest( if expectedErrorRaised { // Make sure to discard the transaction before exit, else an unwanted error // may surface later (e.g. on database close). - txns[action.TransactionId].Discard(ctx) + txns[action.TransactionID].Discard(ctx) return nil } @@ -594,9 +594,9 @@ func commitTransaction( testCase TestCase, action TransactionCommit, ) { - err := txns[action.TransactionId].Commit(ctx) + err := txns[action.TransactionID].Commit(ctx) if err != nil { - txns[action.TransactionId].Discard(ctx) + txns[action.TransactionID].Discard(ctx) } expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError)