diff --git a/pkg/ccl/backupccl/targets.go b/pkg/ccl/backupccl/targets.go index 220a9c74ccd0..fcf6fcb825e0 100644 --- a/pkg/ccl/backupccl/targets.go +++ b/pkg/ccl/backupccl/targets.go @@ -11,7 +11,6 @@ package backupccl import ( "context" "fmt" - "reflect" "sort" "strings" @@ -21,8 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" @@ -37,7 +34,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" ) @@ -423,137 +419,6 @@ func selectTargets( // EntryFiles is a group of sst files of a backup table range type EntryFiles []execinfrapb.RestoreFileSpec -// BackupTableEntry wraps information of a table retrieved -// from backup manifests. -// exported to cliccl for exporting data directly from backup sst. -type BackupTableEntry struct { - Desc catalog.TableDescriptor - Span roachpb.Span - Files []EntryFiles - LastSchemaChangeTime hlc.Timestamp -} - -// MakeBackupTableEntry looks up the descriptor of fullyQualifiedTableName -// from backupManifests and returns a BackupTableEntry, which contains -// the table descriptor, the primary index span, and the sst files. -func MakeBackupTableEntry( - ctx context.Context, - fullyQualifiedTableName string, - backupManifests []backuppb.BackupManifest, - endTime hlc.Timestamp, - user username.SQLUsername, - backupCodec keys.SQLCodec, -) (BackupTableEntry, error) { - var descName []string - if descName = strings.Split(fullyQualifiedTableName, "."); len(descName) != 3 { - return BackupTableEntry{}, errors.Newf("table name should be specified in format databaseName.schemaName.tableName") - } - - if !endTime.IsEmpty() { - ind := -1 - for i, b := range backupManifests { - if b.StartTime.Less(endTime) && endTime.LessEq(b.EndTime) { - if endTime != b.EndTime && b.MVCCFilter != backuppb.MVCCFilter_All { - errorHints := "reading data for requested time requires that BACKUP was created with %q" + - " or should specify the time to be an exact backup time, nearest backup time is %s" - return BackupTableEntry{}, errors.WithHintf( - errors.Newf("unknown read time: %s", timeutil.Unix(0, endTime.WallTime).UTC()), - errorHints, backupOptRevisionHistory, timeutil.Unix(0, b.EndTime.WallTime).UTC(), - ) - } - ind = i - break - } - } - if ind == -1 { - return BackupTableEntry{}, errors.Newf("supplied backups do not cover requested time %s", timeutil.Unix(0, endTime.WallTime).UTC()) - } - backupManifests = backupManifests[:ind+1] - } - - allDescs, _ := backupinfo.LoadSQLDescsFromBackupsAtTime(backupManifests, endTime) - resolver, err := backupresolver.NewDescriptorResolver(allDescs) - if err != nil { - return BackupTableEntry{}, errors.Wrapf(err, "creating a new resolver for all descriptors") - } - - found, _, desc, err := resolver.LookupObject(ctx, tree.ObjectLookupFlags{}, descName[0], descName[1], descName[2]) - if err != nil { - return BackupTableEntry{}, errors.Wrapf(err, "looking up table %s", fullyQualifiedTableName) - } - if !found { - return BackupTableEntry{}, errors.Newf("table %s not found", fullyQualifiedTableName) - } - tbMutable, ok := desc.(*tabledesc.Mutable) - if !ok { - return BackupTableEntry{}, errors.Newf("object %s not mutable", fullyQualifiedTableName) - } - tbDesc, err := catalog.AsTableDescriptor(tbMutable) - if err != nil { - return BackupTableEntry{}, errors.Wrapf(err, "fetching table %s descriptor", fullyQualifiedTableName) - } - - tablePrimaryIndexSpan := tbDesc.PrimaryIndexSpan(backupCodec) - - if err := checkCoverage(ctx, []roachpb.Span{tablePrimaryIndexSpan}, backupManifests); err != nil { - return BackupTableEntry{}, errors.Wrapf(err, "making spans for table %s", fullyQualifiedTableName) - } - - introducedSpanFrontier, err := createIntroducedSpanFrontier(backupManifests, hlc.Timestamp{}) - if err != nil { - return BackupTableEntry{}, err - } - - entry := makeSimpleImportSpans( - []roachpb.Span{tablePrimaryIndexSpan}, - backupManifests, - nil, /*backupLocalityInfo*/ - introducedSpanFrontier, - roachpb.Key{}, /*lowWaterMark*/ - 0, /* disable merging */ - ) - lastSchemaChangeTime := findLastSchemaChangeTime(backupManifests, tbDesc, endTime) - - backupTableEntry := BackupTableEntry{ - tbDesc, - tablePrimaryIndexSpan, - make([]EntryFiles, 0), - lastSchemaChangeTime, - } - - for _, e := range entry { - backupTableEntry.Files = append(backupTableEntry.Files, e.Files) - } - - return backupTableEntry, nil -} - -func findLastSchemaChangeTime( - backupManifests []backuppb.BackupManifest, tbDesc catalog.TableDescriptor, endTime hlc.Timestamp, -) hlc.Timestamp { - lastSchemaChangeTime := endTime - for i := len(backupManifests) - 1; i >= 0; i-- { - manifest := backupManifests[i] - for j := len(manifest.DescriptorChanges) - 1; j >= 0; j-- { - rev := manifest.DescriptorChanges[j] - - if endTime.LessEq(rev.Time) { - continue - } - - if rev.ID == tbDesc.GetID() { - d := descbuilder.NewBuilder(rev.Desc).BuildExistingMutable() - revDesc, _ := catalog.AsTableDescriptor(d) - if !reflect.DeepEqual(revDesc.PublicColumns(), tbDesc.PublicColumns()) { - return lastSchemaChangeTime - } - lastSchemaChangeTime = rev.Time - } - } - } - return lastSchemaChangeTime -} - // checkMultiRegionCompatible checks if the given table is compatible to be // restored into the given database according to its multi-region locality. // It returns an error describing the incompatibility if not. diff --git a/pkg/ccl/cliccl/BUILD.bazel b/pkg/ccl/cliccl/BUILD.bazel index 492060d05179..eff3040a618e 100644 --- a/pkg/ccl/cliccl/BUILD.bazel +++ b/pkg/ccl/cliccl/BUILD.bazel @@ -6,7 +6,6 @@ go_library( srcs = [ "cliccl.go", "debug.go", - "debug_backup.go", "demo.go", "start.go", ], @@ -14,51 +13,20 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/base", - "//pkg/blobs", - "//pkg/ccl/backupccl", - "//pkg/ccl/backupccl/backupbase", - "//pkg/ccl/backupccl/backupdest", - "//pkg/ccl/backupccl/backupinfo", - "//pkg/ccl/backupccl/backuppb", - "//pkg/ccl/backupccl/backuputils", "//pkg/ccl/baseccl", "//pkg/ccl/cliccl/cliflagsccl", - "//pkg/ccl/storageccl", "//pkg/ccl/storageccl/engineccl/enginepbccl", "//pkg/ccl/utilccl", "//pkg/ccl/workloadccl/cliccl", "//pkg/cli", "//pkg/cli/clierrorplus", "//pkg/cli/cliflagcfg", - "//pkg/cli/cliflags", - "//pkg/cli/clisqlexec", "//pkg/cli/democluster", - "//pkg/cloud", - "//pkg/cloud/nodelocal", - "//pkg/keys", - "//pkg/roachpb", - "//pkg/security/username", - "//pkg/server", - "//pkg/settings/cluster", - "//pkg/sql/catalog/colinfo", - "//pkg/sql/catalog/descpb", - "//pkg/sql/catalog/funcdesc", - "//pkg/sql/catalog/tabledesc", - "//pkg/sql/row", - "//pkg/sql/rowenc", - "//pkg/sql/sem/catconstants", - "//pkg/sql/sem/eval", - "//pkg/sql/sem/tree", "//pkg/storage", "//pkg/storage/enginepb", - "//pkg/util/hlc", - "//pkg/util/humanizeutil", "//pkg/util/protoutil", "//pkg/util/stop", "//pkg/util/timeutil", - "//pkg/util/timeutil/pgdate", - "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//oserror", "@com_github_spf13_cobra//:cobra", @@ -68,28 +36,13 @@ go_library( go_test( name = "cliccl_test", size = "medium", - srcs = [ - "debug_backup_test.go", - "main_test.go", - ], + srcs = ["main_test.go"], args = ["-test.timeout=295s"], - embed = [":cliccl"], deps = [ - "//pkg/base", "//pkg/build", - "//pkg/ccl/backupccl/backupbase", "//pkg/ccl/utilccl", - "//pkg/cli", - "//pkg/cli/clisqlexec", "//pkg/server", - "//pkg/testutils", "//pkg/testutils/serverutils", - "//pkg/testutils/sqlutils", - "//pkg/util/hlc", - "//pkg/util/leaktest", - "//pkg/util/log", - "//pkg/util/timeutil", - "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/ccl/cliccl/debug_backup.go b/pkg/ccl/cliccl/debug_backup.go deleted file mode 100644 index ada7b714b5cc..000000000000 --- a/pkg/ccl/cliccl/debug_backup.go +++ /dev/null @@ -1,851 +0,0 @@ -// Copyright 2017 The Cockroach Authors. -// -// Licensed as a CockroachDB Enterprise file under the Cockroach Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt - -package cliccl - -import ( - "bytes" - "context" - "encoding/csv" - gohex "encoding/hex" - "encoding/json" - "fmt" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/cockroachdb/apd/v3" - "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/blobs" - "github.com/cockroachdb/cockroach/pkg/ccl/backupccl" - "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backupbase" - "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backupdest" - "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backupinfo" - "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backuppb" - "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backuputils" - "github.com/cockroachdb/cockroach/pkg/ccl/storageccl" - "github.com/cockroachdb/cockroach/pkg/cli" - "github.com/cockroachdb/cockroach/pkg/cli/clierrorplus" - "github.com/cockroachdb/cockroach/pkg/cli/cliflags" - "github.com/cockroachdb/cockroach/pkg/cli/clisqlexec" - "github.com/cockroachdb/cockroach/pkg/cloud" - "github.com/cockroachdb/cockroach/pkg/cloud/nodelocal" - "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/funcdesc" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" - "github.com/cockroachdb/cockroach/pkg/sql/row" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" - "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" - "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/storage" - "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" - "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" - "github.com/cockroachdb/cockroach/pkg/util/uuid" - "github.com/cockroachdb/errors" - "github.com/spf13/cobra" -) - -const ( - backupOptRevisionHistory = "revision_history" -) - -type key struct { - rawByte []byte - typ string -} - -func (k *key) String() string { - return string(k.rawByte) -} - -func (k *key) Type() string { - return k.typ -} - -func (k *key) setType(v string) (string, error) { - i := strings.IndexByte(v, ':') - if i == -1 { - return "", errors.Newf("no format specified in start key %s", v) - } - k.typ = v[:i] - return v[i+1:], nil -} - -func (k *key) Set(v string) error { - v, err := k.setType(v) - if err != nil { - return err - } - switch k.typ { - case "hex": - b, err := gohex.DecodeString(v) - if err != nil { - return err - } - k.rawByte = b - case "raw": - s, err := strconv.Unquote(`"` + v + `"`) - if err != nil { - return errors.Wrapf(err, "invalid argument %q", v) - } - k.rawByte = []byte(s) - case "bytekey": - s, err := strconv.Unquote(`"` + v + `"`) - if err != nil { - return errors.Wrapf(err, "invalid argument %q", v) - } - k.rawByte = []byte(s) - } - return nil -} - -// debugBackupArgs captures the parameters of the `debug backup` command. -var debugBackupArgs struct { - externalIODir string - - exportTableName string - readTime string - destination string - format string - nullas string - maxRows int - startKey key - withRevisions bool - - rowCount int -} - -// setDebugBackupArgsDefault set the default values in debugBackupArgs. -// This function is called in every test that exercises debug backup -// command-line parsing. -func setDebugContextDefault() { - debugBackupArgs.externalIODir = "" - debugBackupArgs.exportTableName = "" - debugBackupArgs.readTime = "" - debugBackupArgs.destination = "" - debugBackupArgs.format = "csv" - debugBackupArgs.nullas = "null" - debugBackupArgs.maxRows = 0 - debugBackupArgs.startKey = key{} - debugBackupArgs.rowCount = 0 - debugBackupArgs.withRevisions = false -} - -func init() { - - showCmd := &cobra.Command{ - Use: "show ", - Short: "show backup summary", - Long: "Shows summary of meta information about a SQL backup.", - Args: cobra.ExactArgs(1), - RunE: clierrorplus.MaybeDecorateError(runShowCmd), - } - - listBackupsCmd := &cobra.Command{ - Use: "list-backups ", - Short: "show backups in collection", - Long: "Shows full backup paths in a backup collection.", - Args: cobra.ExactArgs(1), - RunE: clierrorplus.MaybeDecorateError(runListBackupsCmd), - } - - listIncrementalCmd := &cobra.Command{ - Use: "list-incremental ", - Short: "show incremental backups", - Long: "Shows incremental chain of a SQL backup.", - Args: cobra.ExactArgs(1), - RunE: clierrorplus.MaybeDecorateError(runListIncrementalCmd), - } - - exportDataCmd := &cobra.Command{ - Use: "export ", - Short: "export table data from a backup", - Long: "export table data from a backup, requires specifying --table to export data from", - Args: cobra.MinimumNArgs(1), - RunE: clierrorplus.MaybeDecorateError(runExportDataCmd), - } - - backupCmds := &cobra.Command{ - Use: "backup [command]", - Short: "debug backups", - Long: "Shows information about a SQL backup.", - RunE: func(cmd *cobra.Command, args []string) error { - return cli.UsageAndErr(cmd, args) - }, - // The debug backups command is hidden from the help - // to signal that it isn't yet a stable interface. - Hidden: true, - } - - backupFlags := backupCmds.Flags() - backupFlags.StringVarP( - &debugBackupArgs.externalIODir, - cliflags.ExternalIODir.Name, - cliflags.ExternalIODir.Shorthand, - "", /*value*/ - cliflags.ExternalIODir.Usage()) - - exportDataCmd.Flags().StringVarP( - &debugBackupArgs.exportTableName, - cliflags.ExportTableTarget.Name, - cliflags.ExportTableTarget.Shorthand, - "", /*value*/ - cliflags.ExportTableTarget.Usage()) - - exportDataCmd.Flags().StringVarP( - &debugBackupArgs.readTime, - cliflags.ReadTime.Name, - cliflags.ReadTime.Shorthand, - "", /*value*/ - cliflags.ReadTime.Usage()) - - exportDataCmd.Flags().StringVarP( - &debugBackupArgs.destination, - cliflags.ExportDestination.Name, - cliflags.ExportDestination.Shorthand, - "", /*value*/ - cliflags.ExportDestination.Usage()) - - exportDataCmd.Flags().StringVarP( - &debugBackupArgs.format, - cliflags.ExportTableFormat.Name, - cliflags.ExportTableFormat.Shorthand, - "csv", /*value*/ - cliflags.ExportTableFormat.Usage()) - - exportDataCmd.Flags().StringVarP( - &debugBackupArgs.nullas, - cliflags.ExportCSVNullas.Name, - cliflags.ExportCSVNullas.Shorthand, - "null", /*value*/ - cliflags.ExportCSVNullas.Usage()) - - exportDataCmd.Flags().IntVar( - &debugBackupArgs.maxRows, - cliflags.MaxRows.Name, - 0, - cliflags.MaxRows.Usage()) - - exportDataCmd.Flags().Var( - &debugBackupArgs.startKey, - cliflags.StartKey.Name, - cliflags.StartKey.Usage()) - - exportDataCmd.Flags().BoolVar( - &debugBackupArgs.withRevisions, - cliflags.ExportRevisions.Name, - false, /*value*/ - cliflags.ExportRevisions.Usage()) - - exportDataCmd.Flags().StringVarP( - &debugBackupArgs.readTime, - cliflags.ExportRevisionsUpTo.Name, - cliflags.ExportRevisionsUpTo.Shorthand, - "", /*value*/ - cliflags.ExportRevisionsUpTo.Usage()) - - backupSubCmds := []*cobra.Command{ - showCmd, - listBackupsCmd, - listIncrementalCmd, - exportDataCmd, - } - - for _, cmd := range backupSubCmds { - backupCmds.AddCommand(cmd) - cmd.Flags().AddFlagSet(backupFlags) - } - cli.DebugCmd.AddCommand(backupCmds) -} - -func newBlobFactory(ctx context.Context, dialing roachpb.NodeID) (blobs.BlobClient, error) { - if dialing != 0 { - return nil, errors.Errorf("accessing node %d during nodelocal access is unsupported for CLI inspection; only local access is supported with nodelocal://self", dialing) - } - if debugBackupArgs.externalIODir == "" { - debugBackupArgs.externalIODir = filepath.Join(server.DefaultStorePath, "extern") - } - return blobs.NewLocalClient(debugBackupArgs.externalIODir) -} - -func externalStorageFromURIFactory( - ctx context.Context, uri string, user username.SQLUsername, opts ...cloud.ExternalStorageOption, -) (cloud.ExternalStorage, error) { - defaultSettings := &cluster.Settings{} - defaultSettings.SV.Init(ctx, nil /* opaque */) - return cloud.ExternalStorageFromURI(ctx, uri, base.ExternalIODirConfig{}, - defaultSettings, newBlobFactory, user, - nil, /* ie */ - nil, /* cf */ - nil, /* kvDB */ - nil, /* limiters */ - opts...) -} - -func getManifestFromURI(ctx context.Context, path string) (backuppb.BackupManifest, error) { - - if !strings.Contains(path, "://") { - path = nodelocal.MakeLocalStorageURI(path) - } - // This reads the raw backup descriptor (with table descriptors possibly not - // upgraded from the old FK representation, or even older formats). If more - // fields are added to the output, the table descriptors may need to be - // upgraded. - backupManifest, _, err := backupinfo.ReadBackupManifestFromURI(ctx, nil /* mem */, path, username.RootUserName(), - externalStorageFromURIFactory, nil, nil) - if err != nil { - return backuppb.BackupManifest{}, err - } - return backupManifest, nil -} - -func runShowCmd(cmd *cobra.Command, args []string) error { - - path := args[0] - ctx := context.Background() - desc, err := getManifestFromURI(ctx, path) - if err != nil { - return errors.Wrapf(err, "fetching backup manifest") - } - - var meta = backupMetaDisplayMsg(desc) - jsonBytes, err := json.MarshalIndent(meta, "" /*prefix*/, "\t" /*indent*/) - if err != nil { - return errors.Wrapf(err, "marshall backup manifest") - } - s := string(jsonBytes) - fmt.Println(s) - return nil -} - -func runListBackupsCmd(cmd *cobra.Command, args []string) error { - - path := args[0] - if !strings.Contains(path, "://") { - path = nodelocal.MakeLocalStorageURI(path) - } - ctx := context.Background() - store, err := externalStorageFromURIFactory(ctx, path, username.RootUserName()) - if err != nil { - return errors.Wrapf(err, "connect to external storage") - } - defer store.Close() - - backupPaths, err := backupdest.ListFullBackupsInCollection(ctx, store) - if err != nil { - return errors.Wrapf(err, "list full backups in collection") - } - - cols := []string{"path"} - rows := make([][]string, 0) - for _, backupPath := range backupPaths { - rows = append(rows, []string{"." + backupPath}) - } - rowSliceIter := clisqlexec.NewRowSliceIter(rows, "l" /*align*/) - return cli.PrintQueryOutput(os.Stdout, cols, rowSliceIter) -} - -func runListIncrementalCmd(cmd *cobra.Command, args []string) error { - // We now have two default incrementals directories to support. - // The "old" method was to simply place all incrementals in the base - // directory. - // The "new" method is to place all incrementals in a subdirectory - // "/incrementals" of the base directory. - // In expected operation, backups will only ever be written to one of these - // locations, i.e. the "new" method will only be use on fresh full backups. - // But since this is a debug command, we will be thorough in searching for - // all possible incremental backups. - // - // Takes command a path in two formats - either directly to a particular - // backup, or to the default incrementals subdir. - // For example, for the given full backup, both of the following are - // supported and produce identical output: - // cockroach debug backup list-incremental nodelocal://self/mybackup/2022/02/10-212843.96 - // cockroach debug backup list-incremental nodelocal://self/mybackup/incrementals/2022/02/10-212843.96 - // - // TODO(bardin): Support custom incrementals directories, which lack a full - // backup nearby. - path := args[0] - if !strings.Contains(path, "://") { - path = nodelocal.MakeLocalStorageURI(path) - } - - basepath, subdir := backupdest.CollectionAndSubdir(path, "") - - uri, err := url.Parse(basepath) - if err != nil { - return err - } - - ctx := context.Background() - - // Start the list of prior incremental backups with the full backup. - priorPaths := []string{backuputils.JoinURLPath( - strings.TrimSuffix( - uri.Path, string(backuputils.URLSeparator)+backupbase.DefaultIncrementalsSubdir), - subdir)} - - // Search for incrementals in the old default location, i.e. the given path. - oldIncURI := *uri - oldIncURI.Path = backuputils.JoinURLPath(oldIncURI.Path, subdir) - baseStore, err := externalStorageFromURIFactory(ctx, oldIncURI.String(), username.RootUserName()) - if err != nil { - return errors.Wrapf(err, "connect to external storage") - } - defer baseStore.Close() - - oldIncPaths, err := backupdest.FindPriorBackups(ctx, baseStore, backupdest.OmitManifest) - if err != nil { - return err - } - for _, path := range oldIncPaths { - priorPaths = append(priorPaths, backuputils.JoinURLPath(oldIncURI.Path, path)) - } - - // Search for incrementals in the new default location, i.e. the "/incrementals" subdir. - newIncURI := *uri - newIncURI.Path = backuputils.JoinURLPath(newIncURI.Path, backupbase.DefaultIncrementalsSubdir, subdir) - incStore, err := externalStorageFromURIFactory(ctx, newIncURI.String(), username.RootUserName()) - if err != nil { - return errors.Wrapf(err, "connect to external storage") - } - defer incStore.Close() - - newIncPaths, err := backupdest.FindPriorBackups(ctx, incStore, backupdest.OmitManifest) - if err != nil { - return err - } - for _, path := range newIncPaths { - priorPaths = append(priorPaths, backuputils.JoinURLPath(newIncURI.Path, path)) - } - - // List and report manifests found in all locations. - stores := make([]cloud.ExternalStorage, len(priorPaths)) - rows := make([][]string, 0) - for i, path := range priorPaths { - uri.Path = path - stores[i], err = externalStorageFromURIFactory(ctx, uri.String(), username.RootUserName()) - if err != nil { - return errors.Wrapf(err, "connect to external storage") - } - defer stores[i].Close() - manifest, _, err := backupinfo.ReadBackupManifestFromStore(ctx, nil /* mem */, stores[i], - nil, nil) - if err != nil { - return err - } - startTime := manifest.StartTime.GoTime().Format(time.RFC3339) - endTime := manifest.EndTime.GoTime().Format(time.RFC3339) - if i == 0 { - startTime = "-" - } - newRow := []string{uri.Path, startTime, endTime} - rows = append(rows, newRow) - } - cols := []string{"path", "start time", "end time"} - rowSliceIter := clisqlexec.NewRowSliceIter(rows, "lll" /*align*/) - return cli.PrintQueryOutput(os.Stdout, cols, rowSliceIter) -} - -func runExportDataCmd(cmd *cobra.Command, args []string) error { - if debugBackupArgs.exportTableName == "" { - return errors.New("export data requires table name specified by --table flag") - } - fullyQualifiedTableName := strings.ToLower(debugBackupArgs.exportTableName) - manifestPaths := args - ctx := context.Background() - manifests := make([]backuppb.BackupManifest, 0, len(manifestPaths)) - for _, path := range manifestPaths { - manifest, err := getManifestFromURI(ctx, path) - if err != nil { - return errors.Wrapf(err, "fetching backup manifests from %s", path) - } - manifests = append(manifests, manifest) - } - - if debugBackupArgs.withRevisions && manifests[0].MVCCFilter != backuppb.MVCCFilter_All { - return errors.WithHintf( - errors.Newf("invalid flag: %s", cliflags.ExportRevisions.Name), - "requires backup created with %q", backupOptRevisionHistory, - ) - } - - endTime, err := evalAsOfTimestamp(debugBackupArgs.readTime, manifests) - if err != nil { - return errors.Wrapf(err, "eval as of timestamp %s", debugBackupArgs.readTime) - } - - codec := keys.TODOSQLCodec - entry, err := backupccl.MakeBackupTableEntry( - ctx, - fullyQualifiedTableName, - manifests, - endTime, - username.RootUserName(), - codec, - ) - if err != nil { - return errors.Wrapf(err, "fetching entry") - } - - if err = showData(ctx, entry, endTime, codec); err != nil { - return errors.Wrapf(err, "show data") - } - return nil -} - -func evalAsOfTimestamp( - readTime string, manifests []backuppb.BackupManifest, -) (hlc.Timestamp, error) { - if readTime == "" { - return manifests[len(manifests)-1].EndTime, nil - } - var err error - // Attempt to parse as timestamp. - if ts, _, err := pgdate.ParseTimestampWithoutTimezone(timeutil.Now(), pgdate.DateStyle{Order: pgdate.Order_MDY}, readTime); err == nil { - readTS := hlc.Timestamp{WallTime: ts.UnixNano()} - return readTS, nil - } - // Attempt to parse as a decimal. - if dec, _, err := apd.NewFromString(readTime); err == nil { - if readTS, err := hlc.DecimalToHLC(dec); err == nil { - return readTS, nil - } - } - err = errors.Newf("value %s is neither timestamp nor decimal", readTime) - return hlc.Timestamp{}, err -} - -func showData( - ctx context.Context, entry backupccl.BackupTableEntry, endTime hlc.Timestamp, codec keys.SQLCodec, -) error { - - buf := bytes.NewBuffer([]byte{}) - var writer *csv.Writer - if debugBackupArgs.format != "csv" { - return errors.Newf("only exporting to csv format is supported") - } - if debugBackupArgs.destination == "" { - writer = csv.NewWriter(os.Stdout) - } else { - writer = csv.NewWriter(buf) - } - - rf, err := makeRowFetcher(ctx, entry, codec) - if err != nil { - return errors.Wrapf(err, "make row fetcher") - } - defer rf.Close(ctx) - - if debugBackupArgs.withRevisions { - startT := entry.LastSchemaChangeTime.GoTime().UTC() - endT := endTime.GoTime().UTC() - fmt.Fprintf(os.Stderr, "DETECTED SCHEMA CHANGE AT %s, ONLY SHOWING UPDATES IN RANGE [%s, %s]\n", startT, startT, endT) - } - - for _, files := range entry.Files { - if err := processEntryFiles(ctx, rf, files, entry.Span, entry.LastSchemaChangeTime, endTime, writer); err != nil { - return err - } - if debugBackupArgs.maxRows != 0 && debugBackupArgs.rowCount >= debugBackupArgs.maxRows { - break - } - } - - if debugBackupArgs.destination != "" { - dir, file := filepath.Split(debugBackupArgs.destination) - store, err := externalStorageFromURIFactory(ctx, dir, username.RootUserName()) - if err != nil { - return errors.Wrapf(err, "unable to open store to write files: %s", debugBackupArgs.destination) - } - if err = cloud.WriteFile(ctx, store, file, bytes.NewReader(buf.Bytes())); err != nil { - _ = store.Close() - return err - } - return store.Close() - } - return nil -} - -func makeIters( - ctx context.Context, files backupccl.EntryFiles, -) ([]storage.SimpleMVCCIterator, func() error, error) { - iters := make([]storage.SimpleMVCCIterator, len(files)) - dirStorage := make([]cloud.ExternalStorage, len(files)) - for i, file := range files { - var err error - clusterSettings := cluster.MakeClusterSettings() - dirStorage[i], err = cloud.MakeExternalStorage(ctx, file.Dir, base.ExternalIODirConfig{}, - clusterSettings, - newBlobFactory, - nil, /* ie */ - nil, /* cf */ - nil, /* kvDB */ - nil, /* limiters */ - ) - if err != nil { - return nil, nil, errors.Wrapf(err, "making external storage") - } - - var iterOpts = storage.IterOptions{ - KeyTypes: storage.IterKeyTypePointsAndRanges, - LowerBound: keys.LocalMax, - UpperBound: keys.MaxKey, - } - - iters[i], err = storageccl.ExternalSSTReader(ctx, []storageccl.StoreFile{{Store: dirStorage[i], FilePath: file.Path}}, nil, iterOpts) - - if err != nil { - return nil, nil, errors.Wrapf(err, "fetching sst reader") - } - } - - cleanup := func() error { - for _, iter := range iters { - iter.Close() - } - for _, dir := range dirStorage { - if err := dir.Close(); err != nil { - return err - } - } - return nil - } - return iters, cleanup, nil -} - -func makeRowFetcher( - ctx context.Context, entry backupccl.BackupTableEntry, codec keys.SQLCodec, -) (row.Fetcher, error) { - colIDs := entry.Desc.PublicColumnIDs() - if debugBackupArgs.withRevisions { - colIDs = append(colIDs, colinfo.MVCCTimestampColumnID) - } - - var spec descpb.IndexFetchSpec - if err := rowenc.InitIndexFetchSpec(&spec, codec, entry.Desc, entry.Desc.GetPrimaryIndex(), colIDs); err != nil { - return row.Fetcher{}, err - } - - var rf row.Fetcher - if err := rf.Init( - ctx, - row.FetcherInitArgs{ - WillUseCustomKVBatchFetcher: true, - Alloc: &tree.DatumAlloc{}, - Spec: &spec, - }, - ); err != nil { - return rf, err - } - return rf, nil -} - -func processEntryFiles( - ctx context.Context, - rf row.Fetcher, - files backupccl.EntryFiles, - span roachpb.Span, - startTime hlc.Timestamp, - endTime hlc.Timestamp, - writer *csv.Writer, -) (err error) { - - iters, cleanup, err := makeIters(ctx, files) - defer func() { - if cleanupErr := cleanup(); err == nil { - err = cleanupErr - } - }() - if err != nil { - return errors.Wrapf(err, "make iters") - } - - iter := storage.MakeMultiIterator(iters) - defer iter.Close() - - startKeyMVCC, endKeyMVCC := storage.MVCCKey{Key: span.Key}, storage.MVCCKey{Key: span.EndKey} - if len(debugBackupArgs.startKey.rawByte) != 0 { - if debugBackupArgs.startKey.typ == "bytekey" { - startKeyMVCC.Key = append(startKeyMVCC.Key, debugBackupArgs.startKey.rawByte...) - } else { - startKeyMVCC.Key = roachpb.Key(debugBackupArgs.startKey.rawByte) - } - } - kvFetcher := row.MakeBackupSSTKVFetcher(startKeyMVCC, endKeyMVCC, iter, startTime, endTime, debugBackupArgs.withRevisions) - - if err := rf.StartScanFrom(ctx, &kvFetcher); err != nil { - return errors.Wrapf(err, "row fetcher starts scan") - } - - for { - datums, err := rf.NextRowDecoded(ctx) - if err != nil { - return errors.Wrapf(err, "decode row") - } - if datums == nil { - break - } - rowDisplay := make([]string, datums.Len()) - for i, datum := range datums { - - if debugBackupArgs.withRevisions && i == datums.Len()-1 { - approx, err := eval.DecimalToInexactDTimestamp(datum.(*tree.DDecimal)) - if err != nil { - return errors.Wrapf(err, "convert datum %s to mvcc timestamp", datum) - } - rowDisplay[i] = approx.UTC().String() - break - } - - if datum == tree.DNull { - rowDisplay[i] = debugBackupArgs.nullas - } else { - rowDisplay[i] = datum.String() - } - } - if err := writer.Write(rowDisplay); err != nil { - return err - } - writer.Flush() - - if debugBackupArgs.maxRows != 0 { - debugBackupArgs.rowCount++ - if debugBackupArgs.rowCount >= debugBackupArgs.maxRows { - break - } - } - } - return nil -} - -type backupMetaDisplayMsg backuppb.BackupManifest -type backupFileDisplayMsg backuppb.BackupManifest_File - -func (f backupFileDisplayMsg) MarshalJSON() ([]byte, error) { - fileDisplayMsg := struct { - Path string - Span string - DataSize string - IndexEntries int64 - Rows int64 - }{ - Path: f.Path, - Span: fmt.Sprint(f.Span), - DataSize: string(humanizeutil.IBytes(f.EntryCounts.DataSize)), - IndexEntries: f.EntryCounts.IndexEntries, - Rows: f.EntryCounts.Rows, - } - return json.Marshal(fileDisplayMsg) -} - -func (b backupMetaDisplayMsg) MarshalJSON() ([]byte, error) { - - fileMsg := make([]backupFileDisplayMsg, len(b.Files)) - for i, file := range b.Files { - fileMsg[i] = backupFileDisplayMsg(file) - } - - displayMsg := struct { - StartTime string - EndTime string - DataSize string - Rows int64 - IndexEntries int64 - FormatVersion uint32 - ClusterID uuid.UUID - NodeID roachpb.NodeID - BuildInfo string - Files []backupFileDisplayMsg - Spans string - DatabaseDescriptors map[descpb.ID]string - TableDescriptors map[descpb.ID]string - TypeDescriptors map[descpb.ID]string - SchemaDescriptors map[descpb.ID]string - FunctionDescriptors map[descpb.ID]string - }{ - StartTime: timeutil.Unix(0, b.StartTime.WallTime).Format(time.RFC3339), - EndTime: timeutil.Unix(0, b.EndTime.WallTime).Format(time.RFC3339), - DataSize: string(humanizeutil.IBytes(b.EntryCounts.DataSize)), - Rows: b.EntryCounts.Rows, - IndexEntries: b.EntryCounts.IndexEntries, - FormatVersion: b.FormatVersion, - ClusterID: b.ClusterID, - NodeID: b.NodeID, - BuildInfo: b.BuildInfo.Short(), - Files: fileMsg, - Spans: fmt.Sprint(b.Spans), - DatabaseDescriptors: make(map[descpb.ID]string), - TableDescriptors: make(map[descpb.ID]string), - TypeDescriptors: make(map[descpb.ID]string), - SchemaDescriptors: make(map[descpb.ID]string), - FunctionDescriptors: make(map[descpb.ID]string), - } - - dbIDToName := make(map[descpb.ID]string) - schemaIDToFullyQualifiedName := make(map[descpb.ID]string) - schemaIDToFullyQualifiedName[keys.PublicSchemaIDForBackup] = catconstants.PublicSchemaName - typeIDToFullyQualifiedName := make(map[descpb.ID]string) - tableIDToFullyQualifiedName := make(map[descpb.ID]string) - funcIDToFullyQualifiedName := make(map[descpb.ID]string) - - for i := range b.Descriptors { - d := &b.Descriptors[i] - id := descpb.GetDescriptorID(d) - tableDesc, databaseDesc, typeDesc, schemaDesc, functionDesc := descpb.FromDescriptor(d) - if databaseDesc != nil { - dbIDToName[id] = descpb.GetDescriptorName(d) - } else if schemaDesc != nil { - dbName := dbIDToName[schemaDesc.GetParentID()] - schemaName := descpb.GetDescriptorName(d) - schemaIDToFullyQualifiedName[id] = dbName + "." + schemaName - } else if typeDesc != nil { - parentSchema := schemaIDToFullyQualifiedName[typeDesc.GetParentSchemaID()] - if parentSchema == catconstants.PublicSchemaName { - parentSchema = dbIDToName[typeDesc.GetParentID()] + "." + parentSchema - } - typeName := descpb.GetDescriptorName(d) - typeIDToFullyQualifiedName[id] = parentSchema + "." + typeName - } else if tableDesc != nil { - tbDesc := tabledesc.NewBuilder(tableDesc).BuildImmutable() - parentSchema := schemaIDToFullyQualifiedName[tbDesc.GetParentSchemaID()] - if parentSchema == catconstants.PublicSchemaName { - parentSchema = dbIDToName[tableDesc.GetParentID()] + "." + parentSchema - } - tableName := descpb.GetDescriptorName(d) - tableIDToFullyQualifiedName[id] = parentSchema + "." + tableName - } else if functionDesc != nil { - fnDesc := funcdesc.NewBuilder(functionDesc).BuildImmutable() - parentSchema := schemaIDToFullyQualifiedName[fnDesc.GetParentSchemaID()] - if parentSchema == catconstants.PublicSchemaName { - parentSchema = dbIDToName[functionDesc.GetParentID()] + "." + parentSchema - } - fnName := descpb.GetDescriptorName(d) - funcIDToFullyQualifiedName[id] = parentSchema + "." + fnName - } - } - displayMsg.DatabaseDescriptors = dbIDToName - displayMsg.TableDescriptors = tableIDToFullyQualifiedName - displayMsg.SchemaDescriptors = schemaIDToFullyQualifiedName - displayMsg.TypeDescriptors = typeIDToFullyQualifiedName - displayMsg.FunctionDescriptors = funcIDToFullyQualifiedName - - return json.Marshal(displayMsg) -} diff --git a/pkg/ccl/cliccl/debug_backup_test.go b/pkg/ccl/cliccl/debug_backup_test.go deleted file mode 100644 index a8189f6409aa..000000000000 --- a/pkg/ccl/cliccl/debug_backup_test.go +++ /dev/null @@ -1,942 +0,0 @@ -// Copyright 2021 The Cockroach Authors. -// -// Licensed as a CockroachDB Enterprise file under the Cockroach Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt - -package cliccl - -import ( - "bytes" - "context" - gojson "encoding/json" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/build" - "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backupbase" - "github.com/cockroachdb/cockroach/pkg/cli" - "github.com/cockroachdb/cockroach/pkg/cli/clisqlexec" - "github.com/cockroachdb/cockroach/pkg/testutils" - "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" - "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" - "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/leaktest" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/stretchr/testify/require" -) - -func TestShowSummary(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - c := cli.NewCLITest(cli.TestCLIParams{T: t, NoServer: true}) - defer c.Cleanup() - - ctx := context.Background() - dir, cleanFn := testutils.TempDir(t) - defer cleanFn() - srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir, Insecure: true}) - defer srv.Stopper().Stop(ctx) - - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `CREATE DATABASE testDB`) - sqlDB.Exec(t, `USE testDB`) - - const dbOnlyBackupPath = "nodelocal://0/dbOnlyFooFolder" - ts1 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, `BACKUP DATABASE testDB TO $1 AS OF SYSTEM TIME `+ts1.AsOfSystemTime(), dbOnlyBackupPath) - - sqlDB.Exec(t, `CREATE SCHEMA testDB.testschema`) - sqlDB.Exec(t, `CREATE TYPE fooType AS ENUM ()`) - sqlDB.Exec(t, `CREATE TYPE testDB.testschema.fooType AS ENUM ()`) - sqlDB.Exec(t, `CREATE TABLE fooTable (a INT)`) - sqlDB.Exec(t, `CREATE TABLE testDB.testschema.fooTable (a INT)`) - sqlDB.Exec(t, `INSERT INTO testDB.testschema.fooTable VALUES (123)`) - const backupPath = "nodelocal://0/fooFolder" - ts2 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, `BACKUP DATABASE testDB TO $1 AS OF SYSTEM TIME `+ts2.AsOfSystemTime(), backupPath) - - makeExpander := func(t *testing.T, extra [][2]string) func(string) string { - namesAndIDs := sqlDB.QueryStr(t, - ` -SELECT - replace(replace(ltrim(concat(db.name, '.', schema.name, '.', descriptor.name, '.id'), '.'), '..', '.'), '.', '_'), - descriptor.id -FROM - (SELECT * FROM system.namespace) AS descriptor - LEFT JOIN (SELECT * FROM system.namespace) AS schema ON schema.id = descriptor."parentSchemaID" - LEFT JOIN (SELECT * FROM system.namespace) AS db ON db.id = descriptor."parentID";`) - expander := make(map[string]string, len(namesAndIDs)) - for _, row := range namesAndIDs { - expander[row[0]] = row[1] - } - for _, e := range extra { - expander[e[0]] = e[1] - } - return func(s string) string { - return os.Expand(s, func(key string) string { - if replace, ok := expander[key]; ok { - return replace - } - return key - }) - } - } - // Unmarshal the json to make the object order-invariant in terms of their - // keys. - checkJSONOutputEqual := func(t *testing.T, expected, out string) { - var expectedMap, gotMap interface{} - require.NoError(t, gojson.Unmarshal([]byte(expected), &expectedMap)) - require.NoError(t, gojson.Unmarshal([]byte(trimFirstLine(out)), &gotMap)) - require.EqualValues(t, expectedMap, gotMap) - } - t.Run("show-summary-without-types-or-tables", func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup show %s --external-io-dir=%s", dbOnlyBackupPath, dir)) - require.NoError(t, err) - expectedOutput := - `{ - "StartTime": "1970-01-01T00:00:00Z", - "EndTime": "${end_time}", - "DataSize": "0 B", - "Rows": 0, - "FunctionDescriptors": {}, - "IndexEntries": 0, - "FormatVersion": 1, - "ClusterID": "${cluster_id}", - "NodeID": 0, - "BuildInfo": "${build_info}", - "Files": [], - "Spans": "[]", - "DatabaseDescriptors": { - "${testdb_id}": "testdb" - }, - "TableDescriptors": {}, - "TypeDescriptors": {}, - "SchemaDescriptors": { - "29": "public", - "${testdb_public_id}": "testdb.public" - } -} -` - checkJSONOutputEqual(t, makeExpander(t, [][2]string{ - {"cluster_id", srv.RPCContext().LogicalClusterID.Get().String()}, - {"end_time", ts1.GoTime().Format(time.RFC3339)}, - {"build_info", build.GetInfo().Short()}, - })(expectedOutput), out) - }) - - t.Run("show-summary-with-full-information", func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup show %s --external-io-dir=%s", backupPath, dir)) - require.NoError(t, err) - - var sstFile string - rows := sqlDB.Query(t, `select path from [show backup files $1]`, backupPath) - defer rows.Close() - if !rows.Next() { - require.NoError(t, rows.Err()) - t.Fatal("expected at least 1 row") - } - err = rows.Scan(&sstFile) - require.NoError(t, err) - - expectedOutput := `{ - "StartTime": "1970-01-01T00:00:00Z", - "EndTime": "${end_time}", - "DataSize": "21 B", - "Rows": 1, - "FunctionDescriptors": {}, - "IndexEntries": 0, - "FormatVersion": 1, - "ClusterID": "${cluster_id}", - "NodeID": 0, - "BuildInfo": "${build_info}", - "Files": [ - { - "Path": "${sst_file}", - "Span": "/Table/${testdb_testschema_footable_id}/{1-2}", - "DataSize": "21 B", - "IndexEntries": 0, - "Rows": 1 - } - ], - "Spans": "[/Table/${testdb_public_footable_id}/{1-2} /Table/${testdb_testschema_footable_id}/{1-2}]", - "DatabaseDescriptors": { - "${testdb_id}": "testdb" - }, - "TableDescriptors": { - "${testdb_public_footable_id}": "testdb.public.footable", - "${testdb_testschema_footable_id}": "testdb.testschema.footable" - }, - "TypeDescriptors": { - "${testdb_public_footype_id}": "testdb.public.footype", - "${testdb_public__footype_id}": "testdb.public._footype", - "${testdb_testschema_footype_id}": "testdb.testschema.footype", - "${testdb_testschema__footype_id}": "testdb.testschema._footype" - }, - "SchemaDescriptors": { - "29": "public", - "${testdb_public_id}": "testdb.public", - "${testdb_testschema_id}": "testdb.testschema" - } -} -` - checkJSONOutputEqual(t, makeExpander(t, [][2]string{ - {"cluster_id", srv.RPCContext().LogicalClusterID.Get().String()}, - {"end_time", ts2.GoTime().Format(time.RFC3339)}, - {"build_info", build.GetInfo().Short()}, - {"sst_file", sstFile}, - })(expectedOutput), out) - }) -} - -func TestListBackups(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - c := cli.NewCLITest(cli.TestCLIParams{T: t, NoServer: true}) - defer c.Cleanup() - - ctx := context.Background() - dir, cleanFn := testutils.TempDir(t) - defer cleanFn() - srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir, Insecure: true}) - defer srv.Stopper().Stop(ctx) - - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `CREATE DATABASE testDB`) - sqlDB.Exec(t, `USE testDB`) - const backupPath = "nodelocal://0/fooFolder" - - ts := generateBackupTimestamps(3) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE testDB INTO $1 AS OF SYSTEM TIME '%s'`, ts[0].AsOfSystemTime()), backupPath) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE testDB INTO $1 AS OF SYSTEM TIME '%s'`, ts[1].AsOfSystemTime()), backupPath) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE testDB INTO $1 AS OF SYSTEM TIME '%s'`, ts[2].AsOfSystemTime()), backupPath) - - t.Run("show-backups-with-backups-in-collection", func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup list-backups %s --external-io-dir=%s", backupPath, dir)) - require.NoError(t, err) - - var buf bytes.Buffer - rows := [][]string{ - {"." + ts[0].GoTime().Format(backupbase.DateBasedIntoFolderName)}, - {"." + ts[1].GoTime().Format(backupbase.DateBasedIntoFolderName)}, - {"." + ts[2].GoTime().Format(backupbase.DateBasedIntoFolderName)}, - } - cols := []string{"path"} - rowSliceIter := clisqlexec.NewRowSliceIter(rows, "l" /*align*/) - if err := cli.PrintQueryOutput(&buf, cols, rowSliceIter); err != nil { - t.Fatalf("TestListBackups: PrintQueryOutput: %v", err) - } - checkExpectedOutput(t, buf.String(), out) - }) -} - -func TestListIncremental(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - c := cli.NewCLITest(cli.TestCLIParams{T: t, NoServer: true}) - defer c.Cleanup() - - ctx := context.Background() - dir, cleanFn := testutils.TempDir(t) - defer cleanFn() - srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir, Insecure: true}) - defer srv.Stopper().Stop(ctx) - - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `CREATE DATABASE testDB`) - sqlDB.Exec(t, `USE testDB`) - const backupPath = "nodelocal://0/fooFolder" - ts := generateBackupTimestamps(3) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE testDB TO $1 AS OF SYSTEM TIME '%s'`, ts[0].AsOfSystemTime()), backupPath) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE testDB TO $1 AS OF SYSTEM TIME '%s'`, ts[1].AsOfSystemTime()), backupPath) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE testDB TO $1 AS OF SYSTEM TIME '%s'`, ts[2].AsOfSystemTime()), backupPath) - - out, err := c.RunWithCapture(fmt.Sprintf("debug backup list-incremental %s --external-io-dir=%s", backupPath, dir)) - require.NoError(t, err) - expectedIncFolder := ts[1].GoTime().Format(backupbase.DateBasedIncFolderName) - expectedIncFolder2 := ts[2].GoTime().Format(backupbase.DateBasedIncFolderName) - - var buf bytes.Buffer - rows := [][]string{ - {"/fooFolder", "-", ts[0].GoTime().Format(time.RFC3339)}, - {"/fooFolder/incrementals" + expectedIncFolder, ts[0].GoTime().Format(time.RFC3339), ts[1].GoTime().Format(time.RFC3339)}, - {"/fooFolder/incrementals" + expectedIncFolder2, ts[1].GoTime().Format(time.RFC3339), ts[2].GoTime().Format(time.RFC3339)}, - } - cols := []string{"path", "start time", "end time"} - rowSliceIter := clisqlexec.NewRowSliceIter(rows, "lll" /*align*/) - if err := cli.PrintQueryOutput(&buf, cols, rowSliceIter); err != nil { - t.Fatalf("TestListIncremental: PrintQueryOutput: %v", err) - } - checkExpectedOutput(t, buf.String(), out) -} - -func TestExportData(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - c := cli.NewCLITest(cli.TestCLIParams{T: t, NoServer: true}) - defer c.Cleanup() - - ctx := context.Background() - dir, cleanFn := testutils.TempDir(t) - defer cleanFn() - srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir, Insecure: true}) - defer srv.Stopper().Stop(ctx) - - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `CREATE DATABASE testDB`) - sqlDB.Exec(t, `USE testDB`) - sqlDB.Exec(t, `CREATE SCHEMA testDB.testschema`) - sqlDB.Exec(t, `CREATE TABLE fooTable (id INT PRIMARY KEY, value INT, tag STRING)`) - sqlDB.Exec(t, `INSERT INTO fooTable VALUES (1, 123, 'cat')`) - sqlDB.Exec(t, `CREATE TABLE testDB.testschema.fooTable (id INT PRIMARY KEY, value INT, tag STRING)`) - sqlDB.Exec(t, `INSERT INTO testDB.testschema.fooTable VALUES (2, 223, 'dog')`) - - const backupPublicSchemaPath = "nodelocal://0/fooFolder/public" - sqlDB.Exec(t, `BACKUP TABLE testDB.public.fooTable TO $1 `, backupPublicSchemaPath) - - const backupTestSchemaPath = "nodelocal://0/fooFolder/test" - ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE testDB.testschema.fooTable TO $1 AS OF SYSTEM TIME '%s'`, ts.AsOfSystemTime()), backupTestSchemaPath) - - sqlDB.Exec(t, `INSERT INTO testDB.testschema.fooTable VALUES (3, 333, 'mickey mouse')`) - ts1 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE testDB.testschema.fooTable TO $1 AS OF SYSTEM TIME '%s'`, ts1.AsOfSystemTime()), backupTestSchemaPath) - - sqlDB.Exec(t, `INSERT INTO testDB.testschema.fooTable(id) SELECT * FROM generate_series(4,30)`) - - sqlDB.Exec(t, `ALTER TABLE fooTable SPLIT AT VALUES (10), (20)`) - var rangeNum int - sqlDB.QueryRow(t, `SELECT count(*) from [SHOW RANGES from TABLE fooTable]`).Scan(&rangeNum) - require.Equal(t, 3, rangeNum) - - ts2 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE testDB.testschema.fooTable TO $1 AS OF SYSTEM TIME '%s'`, ts2.AsOfSystemTime()), backupTestSchemaPath) - - testCasesOnError := []struct { - name string - tableName string - backupPaths []string - expectedOutput string - }{ - { - "show-data-with-not-qualified-name", - "fooTable", - []string{backupTestSchemaPath}, - "ERROR: fetching entry: table name should be specified in format databaseName.schemaName.tableName\n", - }, { - "show-data-fail-with-not-found-table-of-public-schema", - "testDB.public.fooTable", - []string{backupTestSchemaPath}, - "ERROR: fetching entry: table testdb.public.footable not found\n", - }, { - "show-data-fail-with-not-found-table-of-user-defined-schema", - "testDB.testschema.fooTable", - []string{backupPublicSchemaPath}, - "ERROR: fetching entry: table testdb.testschema.footable not found\n", - }, { - "show-data-fail-without-table-specified", - "", - []string{backupPublicSchemaPath}, - "ERROR: export data requires table name specified by --table flag\n", - }, - } - for _, tc := range testCasesOnError { - t.Run(tc.name, func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup export %s --table=%s --external-io-dir=%s", - strings.Join(tc.backupPaths, " "), - tc.tableName, - dir)) - require.NoError(t, err) - checkExpectedOutput(t, tc.expectedOutput, out) - }) - } - - testCasesDatumOutput := []struct { - name string - tableName string - backupPaths []string - expectedDatums string - flags string - skip bool - }{ - { - name: "show-data-with-qualified-table-name-of-user-defined-schema", - tableName: "testDB.testschema.fooTable", - backupPaths: []string{backupTestSchemaPath}, - expectedDatums: "2,223,'dog'\n", - }, - { - name: "show-data-with-qualified-table-name-of-public-schema", - tableName: "testDB.public.fooTable", - backupPaths: []string{backupPublicSchemaPath}, - expectedDatums: "1,123,'cat'\n", - }, { - name: "show-data-of-incremental-backup", - tableName: "testDB.testschema.fooTable", - backupPaths: []string{backupTestSchemaPath, backupTestSchemaPath + "/incrementals" + ts1.GoTime().Format(backupbase.DateBasedIncFolderName)}, - expectedDatums: "2,223,'dog'\n3,333,'mickey mouse'\n", - }, { - name: "show-data-of-incremental-backup-with-maxRows-flag", - tableName: "testDB.testschema.fooTable", - backupPaths: []string{backupTestSchemaPath, backupTestSchemaPath + "/incrementals" + ts1.GoTime().Format(backupbase.DateBasedIncFolderName), backupTestSchemaPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName)}, - expectedDatums: "2,223,'dog'\n3,333,'mickey mouse'\n4,null,null\n", - flags: "--max-rows=3", - }, { - name: "show-data-of-incremental-backup-with-maxRows-larger-than-total-rows-of-data", - tableName: "testDB.testschema.fooTable", - backupPaths: []string{backupTestSchemaPath, backupTestSchemaPath + "/incrementals" + ts1.GoTime().Format(backupbase.DateBasedIncFolderName), backupTestSchemaPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName)}, - expectedDatums: "2,223,'dog'\n3,333,'mickey mouse'\n" + generateRows(4, 27), - flags: "--max-rows=300", - }, - { - name: "show-data-of-incremental-backup-with-start-key-specified", - tableName: "testDB.testschema.fooTable", - backupPaths: []string{backupTestSchemaPath, backupTestSchemaPath + "/incrementals" + ts1.GoTime().Format(backupbase.DateBasedIncFolderName), backupTestSchemaPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName)}, - expectedDatums: generateRows(5, 26), - flags: "--start-key=raw:\\xbf\\x89\\x8c\\x8c", - skip: true, - }, - { - name: "show-data-of-incremental-backup-with-start-key-and-max-rows-specified", - tableName: "testDB.testschema.fooTable", - backupPaths: []string{backupTestSchemaPath, backupTestSchemaPath + "/incrementals" + ts1.GoTime().Format(backupbase.DateBasedIncFolderName), backupTestSchemaPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName)}, - expectedDatums: generateRows(5, 6), - flags: "--start-key=raw:\\xbf\\x89\\x8c\\x8c --max-rows=6", - skip: true, - }, { - name: "show-data-of-incremental-backup-of-multiple-entries-with-start-key-and-max-rows-specified", - tableName: "testDB.testschema.fooTable", - backupPaths: []string{backupTestSchemaPath, backupTestSchemaPath + "/incrementals" + ts1.GoTime().Format(backupbase.DateBasedIncFolderName), backupTestSchemaPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName)}, - expectedDatums: generateRows(5, 20), - flags: "--start-key=raw:\\xbf\\x89\\x8c\\x8c --max-rows=20", - skip: true, - }, - { - name: "show-data-of-incremental-backup-with-start-key-of-bytekey-format-and-max-rows-specified", - tableName: "testDB.testschema.fooTable", - backupPaths: []string{backupTestSchemaPath, backupTestSchemaPath + "/incrementals" + ts1.GoTime().Format(backupbase.DateBasedIncFolderName), backupTestSchemaPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName)}, - expectedDatums: generateRows(5, 2), - flags: "--start-key=bytekey:\\x8c\\x8c --max-rows=2", - }, { - name: "show-data-of-incremental-backup-with-start-key-of-hex-format-and-max-rows-specified", - tableName: "testDB.testschema.fooTable", - backupPaths: []string{backupTestSchemaPath, backupTestSchemaPath + "/incrementals" + ts1.GoTime().Format(backupbase.DateBasedIncFolderName), backupTestSchemaPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName)}, - expectedDatums: generateRows(5, 2), - flags: "--start-key=hex:bf898c8c --max-rows=2", - skip: true, - }, - } - - for _, tc := range testCasesDatumOutput { - // TODO(richardjcai): Figure out how to update the start-key to reflect - // the fact that the public schema change bumps up IDs of tables. - // https://github.com/cockroachdb/cockroach/issues/72592 - if tc.skip { - continue - } - t.Run(tc.name, func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup export %s --table=%s --external-io-dir=%s %s", - strings.Join(tc.backupPaths, " "), - tc.tableName, - dir, - tc.flags)) - require.NoError(t, err) - checkExpectedOutput(t, tc.expectedDatums, out) - }) - } -} - -func TestExportDataWithMultipleRanges(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - c := cli.NewCLITest(cli.TestCLIParams{T: t, NoServer: true}) - defer c.Cleanup() - - ctx := context.Background() - dir, cleanFn := testutils.TempDir(t) - defer cleanFn() - srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ExternalIODir: dir, Insecure: true}) - defer srv.Stopper().Stop(ctx) - - sqlDB := sqlutils.MakeSQLRunner(db) - // the small test-case will get entirely buffered/merged by small-file merging - // and mean there one only be a single file. - sqlDB.Exec(t, `SET CLUSTER SETTING bulkio.backup.file_size = '1'`) - sqlDB.Exec(t, `CREATE DATABASE testDB`) - sqlDB.Exec(t, `USE testDB`) - sqlDB.Exec(t, `CREATE TABLE fooTable(id int PRIMARY KEY)`) - sqlDB.Exec(t, `INSERT INTO fooTable select * from generate_series(1,10)`) - sqlDB.Exec(t, `ALTER TABLE fooTable SPLIT AT VALUES (2), (5), (7)`) - - const backupPath = "nodelocal://0/fooFolder" - sqlDB.Exec(t, `BACKUP TABLE fooTable TO $1 `, backupPath) - - var rangeNum int - sqlDB.QueryRow(t, `SELECT count(*) from [SHOW RANGES from TABLE fooTable]`).Scan(&rangeNum) - require.Equal(t, 4, rangeNum) - sqlDB.QueryRow(t, `SELECT count(*) from [SHOW BACKUP FILES $1]`, backupPath).Scan(&rangeNum) - require.Equal(t, 4, rangeNum) - - sqlDB.Exec(t, `ALTER TABLE fooTable ADD COLUMN active BOOL DEFAULT false`) - sqlDB.Exec(t, `INSERT INTO fooTable select * from generate_series(11,15)`) - ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE fooTable TO $1 AS OF SYSTEM TIME '%s'`, ts.AsOfSystemTime()), backupPath) - - sqlDB.QueryRow(t, `SELECT count(*) from [SHOW RANGES from TABLE fooTable]`).Scan(&rangeNum) - require.Equal(t, 1, rangeNum) - sqlDB.QueryRow(t, `SELECT count(*) from [SHOW BACKUP FILES $1]`, backupPath).Scan(&rangeNum) - require.Equal(t, 5, rangeNum) - - t.Run("export-data-with-multiple-ranges", func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup export %s --table=testDB.public.fooTable --external-io-dir=%s", - backupPath, - dir)) - require.NoError(t, err) - var expectedOut string - for i := 1; i <= 10; i++ { - expectedOut = fmt.Sprintf("%s%d\n", expectedOut, i) - } - checkExpectedOutput(t, expectedOut, out) - }) - - t.Run("export-data-with-multiple-ranges-in-incremental-backups", func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup export %s %s --table=testDB.public.fooTable --external-io-dir=%s", - backupPath, backupPath+"/incrementals"+ts.GoTime().Format(backupbase.DateBasedIncFolderName), - dir)) - require.NoError(t, err) - var expectedOut string - for i := 1; i <= 15; i++ { - expectedOut = fmt.Sprintf("%s%d,false\n", expectedOut, i) - } - checkExpectedOutput(t, expectedOut, out) - }) -} - -func TestExportDataAOST(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - c := cli.NewCLITest(cli.TestCLIParams{T: t, NoServer: true}) - defer c.Cleanup() - - ctx := context.Background() - dir, cleanFn := testutils.TempDir(t) - defer cleanFn() - srv, db, _ := serverutils.StartServer(t, - base.TestServerArgs{ - ExternalIODir: dir, - Insecure: true, - // Have to disable testing in MT mode until backups with revision - // history are supported for encapsulated tenants. Tracked with - // #76378. - DisableDefaultTestTenant: true}) - defer srv.Stopper().Stop(ctx) - - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `CREATE DATABASE testDB`) - sqlDB.Exec(t, `USE testDB`) - sqlDB.Exec(t, `CREATE TABLE fooTable (id INT PRIMARY KEY, value INT, tag STRING)`) - - sqlDB.Exec(t, `CREATE SCHEMA fooschema`) - sqlDB.Exec(t, `CREATE TABLE fooschema.fooTable (id INT PRIMARY KEY, value INT, tag STRING, FAMILY f1 (value, tag))`) - - const backupPath = "nodelocal://0/fooFolder" - const backupPathWithRev = "nodelocal://0/fooFolderRev" - - sqlDB.Exec(t, `INSERT INTO fooTable VALUES (1, 123, 'cat')`) - sqlDB.Exec(t, `INSERT INTO fooschema.fooTable VALUES (1, 123, 'foo cat'),(7, 723, 'cockroach')`) - ts := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - - sqlDB.Exec(t, `INSERT INTO fooTable VALUES (2, 223, 'dog')`) - sqlDB.Exec(t, `DELETE FROM fooschema.fooTable WHERE id=7`) - ts1 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s'`, ts1.AsOfSystemTime()), backupPath) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s' WITH revision_history`, ts1.AsOfSystemTime()), backupPathWithRev) - - sqlDB.Exec(t, `INSERT INTO fooTable VALUES (3, 323, 'mickey mouse')`) - sqlDB.Exec(t, `INSERT INTO fooschema.fooTable VALUES (3, 323, 'foo mickey mouse')`) - ts2BeforeSchemaChange := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, `ALTER TABLE fooTable ADD COLUMN active BOOL`) - ts2 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s'`, ts2.AsOfSystemTime()), backupPath) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s' WITH revision_history`, ts2.AsOfSystemTime()), backupPathWithRev) - - sqlDB.Exec(t, `DELETE FROM fooTable WHERE id=3`) - ts3AfterDeletion := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, `UPDATE fooTable SET active=(TRUE) WHERE id = 1`) - ts3 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s'`, ts3.AsOfSystemTime()), backupPath) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s' WITH revision_history`, ts3.AsOfSystemTime()), backupPathWithRev) - - t.Run("show-data-as-of-a-uncovered-timestamp", func(t *testing.T) { - setDebugContextDefault() - tsNotCovered := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - out, err := c.RunWithCapture(fmt.Sprintf("debug backup export %s --table=%s --as-of=%s --external-io-dir=%s", - backupPath, - "testDB.public.fooTable", - tsNotCovered.AsOfSystemTime(), - dir)) - require.NoError(t, err) - expectedError := fmt.Sprintf( - "ERROR: fetching entry: supplied backups do not cover requested time %s\n", - timeutil.Unix(0, tsNotCovered.WallTime).UTC()) - checkExpectedOutput(t, expectedError, out) - }) - - t.Run("show-data-as-of-non-backup-ts-should-return-error", func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup export %s --table=%s --as-of=%s --external-io-dir=%s", - backupPath, - "testDB.public.fooTable", - ts.AsOfSystemTime(), - dir)) - require.NoError(t, err) - expectedError := fmt.Sprintf( - "ERROR: fetching entry: unknown read time: %s\n"+ - "HINT: reading data for requested time requires that BACKUP was created with %q "+ - "or should specify the time to be an exact backup time, nearest backup time is %s\n", - timeutil.Unix(0, ts.WallTime).UTC(), - backupOptRevisionHistory, - timeutil.Unix(0, ts1.WallTime).UTC()) - checkExpectedOutput(t, expectedError, out) - }) - - testCases := []struct { - name string - tableName string - backupPaths []string - asof string - expectedData string - }{ - { - name: "show-data-of-public-schema-without-as-of-time", - tableName: "testDB.public.fooTable", - backupPaths: []string{ - backupPath, - backupPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPath + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName)}, - expectedData: "1,123,'cat',true\n2,223,'dog',null\n", - }, - { - name: "show-data-as-of-a-single-full-backup-timestamp", - tableName: "testDB.public.fooTable", - backupPaths: []string{backupPath}, - asof: ts1.AsOfSystemTime(), - expectedData: "1,123,'cat'\n2,223,'dog'\n", - }, - { - name: "show-data-of-public-schema-as-of-the-second-backup-timestamp-should-work-in-a-chain-of-incremental-backups", - tableName: "testDB.public.fooTable", - backupPaths: []string{ - backupPath, - backupPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPath + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName)}, - asof: ts1.AsOfSystemTime(), - expectedData: "1,123,'cat'\n2,223,'dog'\n", - }, - { - name: "show-data-of-public-schema-as-of-the-second-backup-timestamp-should-work-in-a-chain-of-incremental-backups", - tableName: "testDB.public.fooTable", - backupPaths: []string{ - backupPath, - backupPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPath + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName), - }, - asof: ts2.AsOfSystemTime(), - expectedData: "1,123,'cat',null\n2,223,'dog',null\n3,323,'mickey mouse',null\n", - }, - { - name: "show-data-as-of-foo-schema-as-of-the-second-backup-timestamp-should-work-in-a-chain-of-incremental-backups", - tableName: "testDB.fooschema.fooTable", - backupPaths: []string{ - backupPath, - backupPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPath + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName), - }, - asof: ts2.AsOfSystemTime(), - expectedData: "1,123,'foo cat'\n3,323,'foo mickey mouse'\n", - }, - { - name: "show-data-as-of-public-schema-as-of-the-third-backup-timestamp-should-work-in-a-chain-of-incremental-backups", - tableName: "testDB.public.fooTable", - backupPaths: []string{ - backupPath, - backupPath + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPath + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName), - }, - asof: ts3.AsOfSystemTime(), - expectedData: "1,123,'cat',true\n2,223,'dog',null\n", - }, - { - name: "show-data-with-rev-history-as-of-time-after-first-insertion-should-work-in-a-single-full-backup", - tableName: "testDB.fooschema.fooTable", - backupPaths: []string{backupPathWithRev}, - asof: ts.AsOfSystemTime(), - expectedData: "1,123,'foo cat'\n7,723,'cockroach'\n", - }, - { - name: "show-data-with-rev-history-as-of-time-after-deteletion-should-work-in-a-single-full-backup", - tableName: "testDB.fooschema.fooTable", - backupPaths: []string{backupPathWithRev}, - asof: ts1.AsOfSystemTime(), - expectedData: "1,123,'foo cat'\n", - }, - { - name: "show-data-with-rev-history-as-of-time-after-first-insertion-should-work-in-a-chain-of-backups", - tableName: "testDB.fooschema.fooTable", - backupPaths: []string{ - backupPathWithRev, - backupPathWithRev + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPathWithRev + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName)}, - asof: ts.AsOfSystemTime(), - expectedData: "1,123,'foo cat'\n7,723,'cockroach'\n", - }, - { - name: "show-data-with-rev-history-as-of-time-after-deteletion-should-work-in-a-chain-of-backups", - tableName: "testDB.fooschema.fooTable", - backupPaths: []string{ - backupPathWithRev, - backupPathWithRev + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPathWithRev + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName)}, - asof: ts1.AsOfSystemTime(), - expectedData: "1,123,'foo cat'\n", - }, - { - name: "show-data-with-rev-history-as-of-time-before-schema-changes-should-work-in-a-chain-of-backups", - tableName: "testDB.public.fooTable", - backupPaths: []string{ - backupPathWithRev, - backupPathWithRev + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPathWithRev + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName)}, - asof: ts2BeforeSchemaChange.AsOfSystemTime(), - expectedData: "1,123,'cat'\n2,223,'dog'\n3,323,'mickey mouse'\n", - }, - { - name: "show-data-with-rev-history-history-as-of-time-after-schema-changes-should-work-in-a-chain-of-backups", - tableName: "testDB.public.fooTable", - backupPaths: []string{ - backupPathWithRev, - backupPathWithRev + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPathWithRev + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName)}, - asof: ts2.AsOfSystemTime(), - expectedData: "1,123,'cat',null\n2,223,'dog',null\n3,323,'mickey mouse',null\n", - }, - { - name: "show-data-with-rev-history-as-of-time-after-deletion-should-work-in-a-chain-of-backups", - tableName: "testDB.public.fooTable", - backupPaths: []string{ - backupPathWithRev, - backupPathWithRev + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPathWithRev + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName)}, - asof: ts3AfterDeletion.AsOfSystemTime(), - expectedData: "1,123,'cat',null\n2,223,'dog',null\n", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup export %s --table=%s --as-of=%s --external-io-dir=%s ", - strings.Join(tc.backupPaths, " "), - tc.tableName, - tc.asof, - dir)) - require.NoError(t, err) - checkExpectedOutput(t, tc.expectedData, out) - }) - } -} - -func TestExportDataWithRevisions(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - c := cli.NewCLITest(cli.TestCLIParams{T: t, NoServer: true}) - defer c.Cleanup() - - ctx := context.Background() - dir, cleanFn := testutils.TempDir(t) - defer cleanFn() - srv, db, _ := serverutils.StartServer(t, - base.TestServerArgs{ - ExternalIODir: dir, - Insecure: true, - // Have to disable testing in MT mode until backups with revision - // history are supported for encapsulated tenants. Tracked with - // #76378. - DisableDefaultTestTenant: true}) - defer srv.Stopper().Stop(ctx) - - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `CREATE DATABASE testDB`) - sqlDB.Exec(t, `USE testDB`) - sqlDB.Exec(t, `CREATE TABLE fooTable (id INT PRIMARY KEY, value INT, tag STRING)`) - - const backupPath = "nodelocal://0/fooFolder" - const backupPathWithRev = "nodelocal://0/fooFolderRev" - - sqlDB.Exec(t, `INSERT INTO fooTable VALUES (1, 123, 'cat')`) - var tsInsert time.Time - sqlDB.QueryRow(t, `SELECT crdb_internal.approximate_timestamp(crdb_internal_mvcc_timestamp) from fooTable where id=1`).Scan(&tsInsert) - - sqlDB.Exec(t, `INSERT INTO fooTable VALUES (2, 223, 'dog')`) - var tsInsert2 time.Time - sqlDB.QueryRow(t, `SELECT crdb_internal.approximate_timestamp(crdb_internal_mvcc_timestamp) from fooTable where id=2`).Scan(&tsInsert2) - - ts1 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s'`, ts1.AsOfSystemTime()), backupPath) - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s' WITH revision_history`, ts1.AsOfSystemTime()), backupPathWithRev) - - sqlDB.Exec(t, `ALTER TABLE fooTable ADD COLUMN active BOOL`) - sqlDB.Exec(t, `INSERT INTO fooTable VALUES (3, 323, 'mickey mouse', true)`) - var tsInsert3 time.Time - sqlDB.QueryRow(t, `SELECT crdb_internal.approximate_timestamp(crdb_internal_mvcc_timestamp) from fooTable where id=3`).Scan(&tsInsert3) - ts2 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s' WITH revision_history`, ts2.AsOfSystemTime()), backupPathWithRev) - - sqlDB.Exec(t, `SET sql_safe_updates=false`) - sqlDB.Exec(t, `ALTER TABLE fooTable DROP COLUMN value`) - var tsDropColumn time.Time - sqlDB.QueryRow(t, `SELECT crdb_internal.approximate_timestamp(crdb_internal_mvcc_timestamp) from fooTable where id=3`).Scan(&tsDropColumn) - - sqlDB.Exec(t, `UPDATE fooTable SET tag=('lion') WHERE id = 1`) - var tsUpdate time.Time - sqlDB.QueryRow(t, `SELECT crdb_internal.approximate_timestamp(crdb_internal_mvcc_timestamp) from fooTable where id=1`).Scan(&tsUpdate) - - ts3 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s' WITH revision_history`, ts3.AsOfSystemTime()), backupPathWithRev) - - sqlDB.Exec(t, `CREATE INDEX extra ON fooTable (id)`) - ts4 := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO $1 AS OF SYSTEM TIME '%s' WITH revision_history`, ts4.AsOfSystemTime()), backupPathWithRev) - - t.Run("show-data-revisions-of-backup-without-revision-history", func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup export %s --table=%s --with-revisions --external-io-dir=%s", - backupPath, - "testDB.public.fooTable", - dir)) - require.NoError(t, err) - expectedError := "ERROR: invalid flag: with-revisions\nHINT: requires backup created with \"revision_history\"\n" - checkExpectedOutput(t, expectedError, out) - }) - - testCases := []struct { - name string - tableName string - backupPaths []string - expectedData string - upToTimestamp string - }{ - { - "show-data-revisions-of-a-single-full-backup", - "testDB.public.fooTable", - []string{ - backupPathWithRev, - }, - fmt.Sprintf("1,123,'cat',%s\n2,223,'dog',%s\n", tsInsert.UTC(), tsInsert2.UTC()), - "", - }, - { - "show-data-revisions-after-adding-an-colum", - "testDB.public.fooTable", - []string{ - backupPathWithRev, - backupPathWithRev + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPathWithRev + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName), - }, - fmt.Sprintf("3,323,'mickey mouse',true,%s\n", tsInsert3.UTC()), - ts2.AsOfSystemTime(), - }, - { - "show-data-revisions-after-dropping-an-colum-and-update-value", - "testDB.public.fooTable", - []string{ - backupPathWithRev, - backupPathWithRev + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPathWithRev + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName), - }, - fmt.Sprintf("1,'lion',null,%s\n1,'cat',null,%s\n2,'dog',null,%s\n3,'mickey mouse',true,%s\n", - tsUpdate.UTC(), tsDropColumn.UTC(), tsDropColumn.UTC(), tsDropColumn.UTC()), - ts3.AsOfSystemTime(), - }, { - "show-data-revisions-after-adding-index", - "testDB.public.fooTable", - []string{ - backupPathWithRev, - backupPathWithRev + "/incrementals" + ts2.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPathWithRev + "/incrementals" + ts3.GoTime().Format(backupbase.DateBasedIncFolderName), - backupPathWithRev + "/incrementals" + ts4.GoTime().Format(backupbase.DateBasedIncFolderName), - }, - fmt.Sprintf("1,'lion',null,%s\n1,'cat',null,%s\n2,'dog',null,%s\n3,'mickey mouse',true,%s\n", - tsUpdate.UTC(), tsDropColumn.UTC(), tsDropColumn.UTC(), tsDropColumn.UTC()), - ts4.AsOfSystemTime(), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - setDebugContextDefault() - out, err := c.RunWithCapture(fmt.Sprintf("debug backup export %s --table=%s --with-revisions --up-to=%s --external-io-dir=%s", - strings.Join(tc.backupPaths, " "), - tc.tableName, - tc.upToTimestamp, - dir)) - require.NoError(t, err) - checkExpectedOutput(t, tc.expectedData, out) - }) - } -} - -func trimFirstLine(out string) string { - endOfCmd := strings.Index(out, "\n") - return out[endOfCmd+1:] -} - -func checkExpectedOutput(t *testing.T, expected string, out string) { - require.Equal(t, expected, trimFirstLine(out)) -} - -// generateBackupTimestamps creates n Timestamps with minimal -// interval of 10 milliseconds to be used in incremental -// backup tests. -// Because incremental backup collections are stored in -// a sub-directory structure that assumes that backups -// are taken at least 10 milliseconds apart. -func generateBackupTimestamps(n int) []hlc.Timestamp { - timestamps := make([]hlc.Timestamp, 0, n) - for i := 0; i < n; i++ { - timestamps = append(timestamps, hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}) - time.Sleep(10 * time.Millisecond) - } - return timestamps -} - -// generateRows generates rows of pattern "%d,null,null\n" -// used to verify that --max-rows captures correct number of rows -func generateRows(start int, rowCount int) string { - var res string - for i := 0; i < rowCount; i++ { - res += fmt.Sprintf("%d,null,null\n", start+i) - } - return res -}