diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 64e4498a4e1d..26138e6cd8dc 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -50,6 +50,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" @@ -1570,7 +1571,7 @@ func TestBackupRestoreResume(t *testing.T) { t.Fatal(err) } createAndWaitForJob( - t, sqlDB, []descpb.ID{backupTableDesc.ID}, + t, sqlDB, []descpb.ID{backupTableDesc.GetID()}, jobspb.BackupDetails{ EndTime: tc.Servers[0].Clock().Now(), URI: "nodelocal://0/backup", @@ -1619,7 +1620,7 @@ func TestBackupRestoreResume(t *testing.T) { t, sqlDB, []descpb.ID{restoreTableID}, jobspb.RestoreDetails{ DescriptorRewrites: map[descpb.ID]*jobspb.RestoreDetails_DescriptorRewrite{ - backupTableDesc.ID: { + backupTableDesc.GetID(): { ParentID: descpb.ID(restoreDatabaseID), ID: restoreTableID, }, @@ -5211,17 +5212,17 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d", "t") seqDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d", "seq") - require.True(t, seqDesc.SequenceOpts.HasOwner(), "no sequence owner after restore") - require.Equal(t, tableDesc.ID, seqDesc.SequenceOpts.SequenceOwner.OwnerTableID, + require.True(t, seqDesc.GetSequenceOpts().HasOwner(), "no sequence owner after restore") + require.Equal(t, tableDesc.GetID(), seqDesc.GetSequenceOpts().SequenceOwner.OwnerTableID, "unexpected table is sequence owner after restore", ) - require.Equal(t, tableDesc.GetColumns()[0].ID, seqDesc.SequenceOpts.SequenceOwner.OwnerColumnID, + require.Equal(t, tableDesc.GetPublicColumns()[0].ID, seqDesc.GetSequenceOpts().SequenceOwner.OwnerColumnID, "unexpected column is sequence owner after restore", ) - require.Equal(t, 1, len(tableDesc.GetColumns()[0].OwnsSequenceIds), + require.Equal(t, 1, len(tableDesc.GetPublicColumns()[0].OwnsSequenceIds), "unexpected number of sequences owned by d.t after restore", ) - require.Equal(t, seqDesc.ID, tableDesc.GetColumns()[0].OwnsSequenceIds[0], + require.Equal(t, seqDesc.GetID(), tableDesc.GetPublicColumns()[0].OwnsSequenceIds[0], "unexpected ID of sequence owned by table d.t after restore", ) }) @@ -5242,7 +5243,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { newDB.Exec(t, `RESTORE TABLE seq FROM $1 WITH skip_missing_sequence_owners`, backupLoc) seqDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d", "seq") - require.False(t, seqDesc.SequenceOpts.HasOwner(), "unexpected owner of restored sequence.") + require.False(t, seqDesc.GetSequenceOpts().HasOwner(), "unexpected owner of restored sequence.") }) // When just the table is restored by itself, the ownership dependency is @@ -5268,7 +5269,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d", "t") - require.Equal(t, 0, len(tableDesc.GetColumns()[0].OwnsSequenceIds), + require.Equal(t, 0, len(tableDesc.GetPublicColumns()[0].OwnsSequenceIds), "expected restored table to own 0 sequences", ) @@ -5277,7 +5278,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { newDB.Exec(t, `RESTORE TABLE seq FROM $1 WITH skip_missing_sequence_owners`, backupLoc) seqDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d", "seq") - require.False(t, seqDesc.SequenceOpts.HasOwner(), "unexpected sequence owner after restore") + require.False(t, seqDesc.GetSequenceOpts().HasOwner(), "unexpected sequence owner after restore") }) // Ownership dependencies should be preserved and remapped when restoring @@ -5295,17 +5296,17 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "restore_db", "t") seqDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "restore_db", "seq") - require.True(t, seqDesc.SequenceOpts.HasOwner(), "no sequence owner after restore") - require.Equal(t, tableDesc.ID, seqDesc.SequenceOpts.SequenceOwner.OwnerTableID, + require.True(t, seqDesc.GetSequenceOpts().HasOwner(), "no sequence owner after restore") + require.Equal(t, tableDesc.GetID(), seqDesc.GetSequenceOpts().SequenceOwner.OwnerTableID, "unexpected table is sequence owner after restore", ) - require.Equal(t, tableDesc.GetColumns()[0].ID, seqDesc.SequenceOpts.SequenceOwner.OwnerColumnID, + require.Equal(t, tableDesc.GetPublicColumns()[0].ID, seqDesc.GetSequenceOpts().SequenceOwner.OwnerColumnID, "unexpected column is sequence owner after restore", ) - require.Equal(t, 1, len(tableDesc.GetColumns()[0].OwnsSequenceIds), + require.Equal(t, 1, len(tableDesc.GetPublicColumns()[0].OwnsSequenceIds), "unexpected number of sequences owned by d.t after restore", ) - require.Equal(t, seqDesc.ID, tableDesc.GetColumns()[0].OwnsSequenceIds[0], + require.Equal(t, seqDesc.GetID(), tableDesc.GetPublicColumns()[0].OwnsSequenceIds[0], "unexpected ID of sequence owned by table d.t after restore", ) }) @@ -5344,7 +5345,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { newDB.Exec(t, `RESTORE DATABASE d2 FROM $1 WITH skip_missing_sequence_owners`, backupLocD2D3) tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d2", "t") - require.Equal(t, 0, len(tableDesc.GetColumns()[0].OwnsSequenceIds), + require.Equal(t, 0, len(tableDesc.GetPublicColumns()[0].OwnsSequenceIds), "expected restored table to own no sequences.", ) @@ -5354,22 +5355,22 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { newDB.Exec(t, `RESTORE DATABASE d3 FROM $1 WITH skip_missing_sequence_owners`, backupLocD2D3) seqDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d3", "seq") - require.False(t, seqDesc.SequenceOpts.HasOwner(), "unexpected sequence owner after restore") + require.False(t, seqDesc.GetSequenceOpts().HasOwner(), "unexpected sequence owner after restore") // Sequence dependencies inside the database should still be preserved. sd := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d3", "seq2") td := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d3", "t") - require.True(t, sd.SequenceOpts.HasOwner(), "no owner found for seq2") - require.Equal(t, td.ID, sd.SequenceOpts.SequenceOwner.OwnerTableID, + require.True(t, sd.GetSequenceOpts().HasOwner(), "no owner found for seq2") + require.Equal(t, td.GetID(), sd.GetSequenceOpts().SequenceOwner.OwnerTableID, "unexpected table owner for sequence seq2 after restore", ) - require.Equal(t, td.GetColumns()[0].ID, sd.SequenceOpts.SequenceOwner.OwnerColumnID, + require.Equal(t, td.GetPublicColumns()[0].ID, sd.GetSequenceOpts().SequenceOwner.OwnerColumnID, "unexpected column owner for sequence seq2 after restore") - require.Equal(t, 1, len(td.GetColumns()[0].OwnsSequenceIds), + require.Equal(t, 1, len(td.GetPublicColumns()[0].OwnsSequenceIds), "unexpected number of sequences owned by d3.t after restore", ) - require.Equal(t, sd.ID, td.GetColumns()[0].OwnsSequenceIds[0], + require.Equal(t, sd.GetID(), td.GetPublicColumns()[0].OwnsSequenceIds[0], "unexpected ID of sequences owned by d3.t", ) }) @@ -5389,17 +5390,17 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d2", "t") seqDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d3", "seq") - require.True(t, seqDesc.SequenceOpts.HasOwner(), "no sequence owner after restore") - require.Equal(t, tableDesc.ID, seqDesc.SequenceOpts.SequenceOwner.OwnerTableID, + require.True(t, seqDesc.GetSequenceOpts().HasOwner(), "no sequence owner after restore") + require.Equal(t, tableDesc.GetID(), seqDesc.GetSequenceOpts().SequenceOwner.OwnerTableID, "unexpected table is sequence owner after restore", ) - require.Equal(t, tableDesc.GetColumns()[0].ID, seqDesc.SequenceOpts.SequenceOwner.OwnerColumnID, + require.Equal(t, tableDesc.GetPublicColumns()[0].ID, seqDesc.GetSequenceOpts().SequenceOwner.OwnerColumnID, "unexpected column is sequence owner after restore", ) - require.Equal(t, 1, len(tableDesc.GetColumns()[0].OwnsSequenceIds), + require.Equal(t, 1, len(tableDesc.GetPublicColumns()[0].OwnsSequenceIds), "unexpected number of sequences owned by d.t after restore", ) - require.Equal(t, seqDesc.ID, tableDesc.GetColumns()[0].OwnsSequenceIds[0], + require.Equal(t, seqDesc.GetID(), tableDesc.GetPublicColumns()[0].OwnsSequenceIds[0], "unexpected ID of sequence owned by table d.t after restore", ) @@ -5408,30 +5409,30 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { sd := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d2", "seq") sdSeq2 := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "d3", "seq2") - require.True(t, sd.SequenceOpts.HasOwner(), "no sequence owner after restore") - require.True(t, sdSeq2.SequenceOpts.HasOwner(), "no sequence owner after restore") + require.True(t, sd.GetSequenceOpts().HasOwner(), "no sequence owner after restore") + require.True(t, sdSeq2.GetSequenceOpts().HasOwner(), "no sequence owner after restore") - require.Equal(t, td.ID, sd.SequenceOpts.SequenceOwner.OwnerTableID, + require.Equal(t, td.GetID(), sd.GetSequenceOpts().SequenceOwner.OwnerTableID, "unexpected table is sequence owner of d3.seq after restore", ) - require.Equal(t, td.ID, sdSeq2.SequenceOpts.SequenceOwner.OwnerTableID, + require.Equal(t, td.GetID(), sdSeq2.GetSequenceOpts().SequenceOwner.OwnerTableID, "unexpected table is sequence owner of d3.seq2 after restore", ) - require.Equal(t, td.GetColumns()[0].ID, sd.SequenceOpts.SequenceOwner.OwnerColumnID, + require.Equal(t, td.GetPublicColumns()[0].ID, sd.GetSequenceOpts().SequenceOwner.OwnerColumnID, "unexpected column is sequence owner of d2.seq after restore", ) - require.Equal(t, td.GetColumns()[0].ID, sdSeq2.SequenceOpts.SequenceOwner.OwnerColumnID, + require.Equal(t, td.GetPublicColumns()[0].ID, sdSeq2.GetSequenceOpts().SequenceOwner.OwnerColumnID, "unexpected column is sequence owner of d3.seq2 after restore", ) - require.Equal(t, 2, len(td.GetColumns()[0].OwnsSequenceIds), + require.Equal(t, 2, len(td.GetPublicColumns()[0].OwnsSequenceIds), "unexpected number of sequences owned by d3.t after restore", ) - require.Equal(t, sd.ID, td.GetColumns()[0].OwnsSequenceIds[0], + require.Equal(t, sd.GetID(), td.GetPublicColumns()[0].OwnsSequenceIds[0], "unexpected ID of sequence owned by table d3.t after restore", ) - require.Equal(t, sdSeq2.ID, td.GetColumns()[0].OwnsSequenceIds[1], + require.Equal(t, sdSeq2.GetID(), td.GetPublicColumns()[0].OwnsSequenceIds[1], "unexpected ID of sequence owned by table d3.t after restore", ) }) @@ -5885,14 +5886,13 @@ func getMockIndexDesc(indexID descpb.IndexID) descpb.IndexDescriptor { func getMockTableDesc( tableID descpb.ID, pkIndex descpb.IndexDescriptor, indexes []descpb.IndexDescriptor, -) tabledesc.Immutable { +) catalog.TableDescriptor { mockTableDescriptor := descpb.TableDescriptor{ ID: tableID, PrimaryIndex: pkIndex, Indexes: indexes, } - mockImmutableTableDesc := tabledesc.MakeImmutable(mockTableDescriptor) - return mockImmutableTableDesc + return tabledesc.NewImmutable(mockTableDescriptor) } // Unit tests for the getLogicallyMergedTableSpans() method. @@ -5986,7 +5986,7 @@ func TestLogicallyMergedTableSpans(t *testing.T) { for _, test := range testCases { t.Run(test.name, func(t *testing.T) { tableDesc := getMockTableDesc(test.tableID, test.pkIndex, test.indexes) - spans, err := getLogicallyMergedTableSpans(&tableDesc, unusedMap, codec, + spans, err := getLogicallyMergedTableSpans(tableDesc, unusedMap, codec, hlc.Timestamp{}, test.checkForKVInBoundsOverride) var mergedSpans []string for _, span := range spans { @@ -6673,7 +6673,7 @@ ALTER TYPE sc.typ ADD VALUE 'hi'; require.EqualValues(t, 2, schemaDesc.Version) tableDesc := catalogkv.TestingGetTableDescriptorFromSchema(kvDB, keys.SystemSQLCodec, "d", "sc", "tb") - require.EqualValues(t, 2, tableDesc.Version) + require.EqualValues(t, 2, tableDesc.GetVersion()) typeDesc := catalogkv.TestingGetTypeDescriptorFromSchema(kvDB, keys.SystemSQLCodec, "d", "sc", "typ") require.EqualValues(t, 2, typeDesc.Version) @@ -6768,7 +6768,7 @@ CREATE TYPE sc.typ AS ENUM ('hello'); require.Equal(t, descpb.DescriptorState_OFFLINE, schemaDesc.State) tableDesc := catalogkv.TestingGetTableDescriptorFromSchema(kvDB, keys.SystemSQLCodec, "d", "sc", "tb") - require.Equal(t, descpb.DescriptorState_OFFLINE, tableDesc.State) + require.Equal(t, descpb.DescriptorState_OFFLINE, tableDesc.GetState()) typeDesc := catalogkv.TestingGetTypeDescriptorFromSchema(kvDB, keys.SystemSQLCodec, "d", "sc", "typ") require.Equal(t, descpb.DescriptorState_OFFLINE, typeDesc.State) @@ -6862,10 +6862,10 @@ CREATE TYPE sc.typ AS ENUM ('hello'); require.Equal(t, descpb.DescriptorState_OFFLINE, schemaDesc.State) publicTableDesc := catalogkv.TestingGetTableDescriptorFromSchema(kvDB, keys.SystemSQLCodec, "newdb", "public", "tb") - require.Equal(t, descpb.DescriptorState_OFFLINE, publicTableDesc.State) + require.Equal(t, descpb.DescriptorState_OFFLINE, publicTableDesc.GetState()) scTableDesc := catalogkv.TestingGetTableDescriptorFromSchema(kvDB, keys.SystemSQLCodec, "newdb", "sc", "tb") - require.Equal(t, descpb.DescriptorState_OFFLINE, scTableDesc.State) + require.Equal(t, descpb.DescriptorState_OFFLINE, scTableDesc.GetState()) typeDesc := catalogkv.TestingGetTypeDescriptorFromSchema(kvDB, keys.SystemSQLCodec, "newdb", "sc", "typ") require.Equal(t, descpb.DescriptorState_OFFLINE, typeDesc.State) diff --git a/pkg/ccl/backupccl/full_cluster_backup_restore_test.go b/pkg/ccl/backupccl/full_cluster_backup_restore_test.go index c8a61b15ef3a..b22e13d0a673 100644 --- a/pkg/ccl/backupccl/full_cluster_backup_restore_test.go +++ b/pkg/ccl/backupccl/full_cluster_backup_restore_test.go @@ -167,16 +167,16 @@ CREATE TABLE data2.foo (a int); // Note the absence of the jobs table. Jobs are tested by another test as // jobs are created during the RESTORE process. systemTablesToVerify := []string{ - systemschema.CommentsTable.Name, - systemschema.LocationsTable.Name, - systemschema.RoleMembersTable.Name, - systemschema.RoleOptionsTable.Name, - systemschema.SettingsTable.Name, - systemschema.TableStatisticsTable.Name, - systemschema.UITable.Name, - systemschema.UsersTable.Name, - systemschema.ZonesTable.Name, - systemschema.ScheduledJobsTable.Name, + systemschema.CommentsTable.GetName(), + systemschema.LocationsTable.GetName(), + systemschema.RoleMembersTable.GetName(), + systemschema.RoleOptionsTable.GetName(), + systemschema.SettingsTable.GetName(), + systemschema.TableStatisticsTable.GetName(), + systemschema.UITable.GetName(), + systemschema.UsersTable.GetName(), + systemschema.ZonesTable.GetName(), + systemschema.ScheduledJobsTable.GetName(), } verificationQueries := make([]string, len(systemTablesToVerify)) @@ -184,11 +184,11 @@ CREATE TABLE data2.foo (a int); // that can be used to ensure that data in those tables is restored. for i, table := range systemTablesToVerify { switch table { - case systemschema.TableStatisticsTable.Name: + case systemschema.TableStatisticsTable.GetName(): // createdAt and statisticsID are re-generated on RESTORE. query := `SELECT "tableID", name, "columnIDs", "rowCount" FROM system.table_statistics` verificationQueries[i] = query - case systemschema.SettingsTable.Name: + case systemschema.SettingsTable.GetName(): // We don't include the cluster version. query := fmt.Sprintf("SELECT * FROM system.%s WHERE name <> 'version'", table) verificationQueries[i] = query diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index d9f5ea5feb75..6ba2b1effac3 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -855,7 +855,7 @@ func spansForAllRestoreTableIndexes( if rawTbl != nil && rawTbl.State != descpb.DescriptorState_DROP { tbl := tabledesc.NewImmutable(*rawTbl) for _, idx := range tbl.NonDropIndexes() { - key := tableAndIndex{tableID: tbl.ID, indexID: idx.GetID()} + key := tableAndIndex{tableID: tbl.GetID(), indexID: idx.GetID()} if !added[key] { if err := sstIntervalTree.Insert(intervalSpan(tbl.IndexSpan(codec, idx.GetID())), false); err != nil { panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan")) diff --git a/pkg/ccl/backupccl/show_test.go b/pkg/ccl/backupccl/show_test.go index 765aeb824aad..b306de31f1ca 100644 --- a/pkg/ccl/backupccl/show_test.go +++ b/pkg/ccl/backupccl/show_test.go @@ -200,7 +200,7 @@ ORDER BY object_type, object_name`, full) // Create tables with the same ID as data.tableA to ensure that comments // from different tables in the restoring cluster don't appear. tableA := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "data", "tablea") - for i := keys.MinUserDescID; i < int(tableA.ID); i++ { + for i := keys.MinUserDescID; i < int(tableA.GetID()); i++ { tableName := fmt.Sprintf("foo%d", i) sqlDBRestore.Exec(t, fmt.Sprintf("CREATE TABLE %s ();", tableName)) sqlDBRestore.Exec(t, fmt.Sprintf("COMMENT ON TABLE %s IS 'table comment'", tableName)) diff --git a/pkg/ccl/backupccl/system_schema.go b/pkg/ccl/backupccl/system_schema.go index e92280c3959e..f90f1ea22bd3 100644 --- a/pkg/ccl/backupccl/system_schema.go +++ b/pkg/ccl/backupccl/system_schema.go @@ -137,94 +137,94 @@ func settingsRestoreFunc( // backup. Every system table should have a specification defined here, enforced // by TestAllSystemTablesHaveBackupConfig. var systemTableBackupConfiguration = map[string]systemBackupConfiguration{ - systemschema.UsersTable.Name: { + systemschema.UsersTable.GetName(): { includeInClusterBackup: optInToClusterBackup, }, - systemschema.ZonesTable.Name: { + systemschema.ZonesTable.GetName(): { includeInClusterBackup: optInToClusterBackup, }, - systemschema.SettingsTable.Name: { + systemschema.SettingsTable.GetName(): { includeInClusterBackup: optInToClusterBackup, customRestoreFunc: settingsRestoreFunc, }, - systemschema.LocationsTable.Name: { + systemschema.LocationsTable.GetName(): { includeInClusterBackup: optInToClusterBackup, }, - systemschema.RoleMembersTable.Name: { + systemschema.RoleMembersTable.GetName(): { includeInClusterBackup: optInToClusterBackup, }, - systemschema.RoleOptionsTable.Name: { + systemschema.RoleOptionsTable.GetName(): { includeInClusterBackup: optInToClusterBackup, }, - systemschema.UITable.Name: { + systemschema.UITable.GetName(): { includeInClusterBackup: optInToClusterBackup, }, - systemschema.CommentsTable.Name: { + systemschema.CommentsTable.GetName(): { includeInClusterBackup: optInToClusterBackup, }, - systemschema.JobsTable.Name: { + systemschema.JobsTable.GetName(): { includeInClusterBackup: optInToClusterBackup, customRestoreFunc: jobsRestoreFunc, }, - systemschema.ScheduledJobsTable.Name: { + systemschema.ScheduledJobsTable.GetName(): { includeInClusterBackup: optInToClusterBackup, }, - systemschema.TableStatisticsTable.Name: { + systemschema.TableStatisticsTable.GetName(): { // Table statistics are backed up in the backup descriptor for now. includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.DescriptorTable.Name: { + systemschema.DescriptorTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.EventLogTable.Name: { + systemschema.EventLogTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.LeaseTable.Name: { + systemschema.LeaseTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.NamespaceTable.Name: { + systemschema.NamespaceTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.DeprecatedNamespaceTable.Name: { + systemschema.DeprecatedNamespaceTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.ProtectedTimestampsMetaTable.Name: { + systemschema.ProtectedTimestampsMetaTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.ProtectedTimestampsRecordsTable.Name: { + systemschema.ProtectedTimestampsRecordsTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.RangeEventTable.Name: { + systemschema.RangeEventTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.ReplicationConstraintStatsTable.Name: { + systemschema.ReplicationConstraintStatsTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.ReplicationCriticalLocalitiesTable.Name: { + systemschema.ReplicationCriticalLocalitiesTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.ReportsMetaTable.Name: { + systemschema.ReportsMetaTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.ReplicationStatsTable.Name: { + systemschema.ReplicationStatsTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.SqllivenessTable.Name: { + systemschema.SqllivenessTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.StatementBundleChunksTable.Name: { + systemschema.StatementBundleChunksTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.StatementDiagnosticsTable.Name: { + systemschema.StatementDiagnosticsTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.StatementDiagnosticsRequestsTable.Name: { + systemschema.StatementDiagnosticsRequestsTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.TenantsTable.Name: { + systemschema.TenantsTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, - systemschema.WebSessionsTable.Name: { + systemschema.WebSessionsTable.GetName(): { includeInClusterBackup: optOutOfClusterBackup, }, } diff --git a/pkg/ccl/backupccl/targets.go b/pkg/ccl/backupccl/targets.go index 15cd4b683fec..4f90a16eb7f6 100644 --- a/pkg/ccl/backupccl/targets.go +++ b/pkg/ccl/backupccl/targets.go @@ -217,7 +217,7 @@ func newDescriptorResolver(descs []catalog.Descriptor) (*descriptorResolver, err var typeToRegister string switch desc := desc.(type) { case catalog.TableDescriptor: - if desc.TableDesc().Temporary { + if desc.IsTemporary() { continue } typeToRegister = "table" diff --git a/pkg/ccl/backupccl/targets_test.go b/pkg/ccl/backupccl/targets_test.go index 5340531b0de9..b5012d366bbe 100644 --- a/pkg/ccl/backupccl/targets_test.go +++ b/pkg/ccl/backupccl/targets_test.go @@ -37,7 +37,7 @@ func TestDescriptorsMatchingTargets(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - // TODO(ajwerner): There should be a constructor for an Immutable + // TODO(ajwerner): There should be a constructor for an immutable // and really all of the leasable descriptor types which includes its initial // DescriptorMeta. This refactoring precedes the actual adoption of // DescriptorMeta. @@ -48,9 +48,9 @@ func TestDescriptorsMatchingTargets(t *testing.T) { type tbDesc = descpb.TableDescriptor type typDesc = descpb.TypeDescriptor ts1 := hlc.Timestamp{WallTime: 1} - mkTable := func(descriptor tbDesc) *tabledesc.Immutable { + mkTable := func(descriptor tbDesc) catalog.TableDescriptor { desc := tabledesc.NewImmutable(descriptor) - desc.ModificationTime = ts1 + desc.TableDesc().ModificationTime = ts1 return desc } mkDB := func(id descpb.ID, name string) *dbdesc.Immutable { diff --git a/pkg/ccl/changefeedccl/BUILD.bazel b/pkg/ccl/changefeedccl/BUILD.bazel index 365e79a4ac40..c8bdfd4e18d6 100644 --- a/pkg/ccl/changefeedccl/BUILD.bazel +++ b/pkg/ccl/changefeedccl/BUILD.bazel @@ -51,7 +51,6 @@ go_library( "//pkg/sql/catalog/hydratedtables", "//pkg/sql/catalog/lease", "//pkg/sql/catalog/resolver", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", "//pkg/sql/flowinfra", diff --git a/pkg/ccl/changefeedccl/bench_test.go b/pkg/ccl/changefeedccl/bench_test.go index cae4723e99c2..5388f3a4c0dc 100644 --- a/pkg/ccl/changefeedccl/bench_test.go +++ b/pkg/ccl/changefeedccl/bench_test.go @@ -189,8 +189,8 @@ func createBenchmarkChangefeed( tableDesc := catalogkv.TestingGetTableDescriptor(s.DB(), keys.SystemSQLCodec, database, table) spans := []roachpb.Span{tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)} details := jobspb.ChangefeedDetails{ - Targets: jobspb.ChangefeedTargets{tableDesc.ID: jobspb.ChangefeedTarget{ - StatementTimeName: tableDesc.Name, + Targets: jobspb.ChangefeedTargets{tableDesc.GetID(): jobspb.ChangefeedTarget{ + StatementTimeName: tableDesc.GetName(), }}, Opts: map[string]string{ changefeedbase.OptEnvelope: string(changefeedbase.OptEnvelopeRow), diff --git a/pkg/ccl/changefeedccl/changefeed.go b/pkg/ccl/changefeedccl/changefeed.go index a5df4b7cb4a2..674d2f1ad9b3 100644 --- a/pkg/ccl/changefeedccl/changefeed.go +++ b/pkg/ccl/changefeedccl/changefeed.go @@ -82,10 +82,10 @@ func kvsToRows( if err != nil { return nil, err } - if _, ok := details.Targets[desc.ID]; !ok { + if _, ok := details.Targets[desc.GetID()]; !ok { // This kv is for an interleaved table that we're not watching. if log.V(3) { - log.Infof(ctx, `skipping key from unwatched table %s: %s`, desc.Name, kv.Key) + log.Infof(ctx, `skipping key from unwatched table %s: %s`, desc.GetName(), kv.Key) } return nil, nil } diff --git a/pkg/ccl/changefeedccl/kvfeed/BUILD.bazel b/pkg/ccl/changefeedccl/kvfeed/BUILD.bazel index 1607c66fa9e5..c2b66a8c0abe 100644 --- a/pkg/ccl/changefeedccl/kvfeed/BUILD.bazel +++ b/pkg/ccl/changefeedccl/kvfeed/BUILD.bazel @@ -54,8 +54,8 @@ go_test( "//pkg/keys", "//pkg/roachpb", "//pkg/settings/cluster", + "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/rowenc", "//pkg/sql/sem/tree", "//pkg/util/ctxgroup", diff --git a/pkg/ccl/changefeedccl/kvfeed/kv_feed.go b/pkg/ccl/changefeedccl/kvfeed/kv_feed.go index 51014608d578..37e4ab88072e 100644 --- a/pkg/ccl/changefeedccl/kvfeed/kv_feed.go +++ b/pkg/ccl/changefeedccl/kvfeed/kv_feed.go @@ -226,14 +226,14 @@ func (f *kvFeed) scanIfShould( // Only backfill for the tables which have events which may not be all // of the targets. for _, ev := range events { - tablePrefix := f.codec.TablePrefix(uint32(ev.After.ID)) + tablePrefix := f.codec.TablePrefix(uint32(ev.After.GetID())) tableSpan := roachpb.Span{Key: tablePrefix, EndKey: tablePrefix.PrefixEnd()} for _, sp := range f.spans { if tableSpan.Overlaps(sp) { spansToBackfill = append(spansToBackfill, sp) } } - if !scanTime.Equal(ev.After.ModificationTime) { + if !scanTime.Equal(ev.After.GetModificationTime()) { log.Fatalf(ctx, "found event in shouldScan which did not occur at the scan time %v: %v", scanTime, ev) } diff --git a/pkg/ccl/changefeedccl/kvfeed/kv_feed_test.go b/pkg/ccl/changefeedccl/kvfeed/kv_feed_test.go index 774b87260ee4..9a7212ca1122 100644 --- a/pkg/ccl/changefeedccl/kvfeed/kv_feed_test.go +++ b/pkg/ccl/changefeedccl/kvfeed/kv_feed_test.go @@ -21,8 +21,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" @@ -83,7 +83,7 @@ func TestKVFeed(t *testing.T) { spans []roachpb.Span events []roachpb.RangeFeedEvent - descs []*tabledesc.Immutable + descs []catalog.TableDescriptor expScans []hlc.Timestamp expEvents int @@ -193,7 +193,7 @@ func TestKVFeed(t *testing.T) { ts(2), ts(3), }, - descs: []*tabledesc.Immutable{ + descs: []catalog.TableDescriptor{ makeTableDesc(42, 1, ts(1), 2), addColumnDropBackfillMutation(makeTableDesc(42, 2, ts(3), 1)), }, @@ -217,7 +217,7 @@ func TestKVFeed(t *testing.T) { expScans: []hlc.Timestamp{ ts(2), }, - descs: []*tabledesc.Immutable{ + descs: []catalog.TableDescriptor{ makeTableDesc(42, 1, ts(1), 2), addColumnDropBackfillMutation(makeTableDesc(42, 2, ts(3), 1)), }, @@ -242,7 +242,7 @@ func TestKVFeed(t *testing.T) { expScans: []hlc.Timestamp{ ts(2), }, - descs: []*tabledesc.Immutable{ + descs: []catalog.TableDescriptor{ makeTableDesc(42, 1, ts(1), 2), addColumnDropBackfillMutation(makeTableDesc(42, 2, ts(4), 1)), }, @@ -268,21 +268,21 @@ type rawTableFeed struct { events []schemafeed.TableEvent } -func newRawTableFeed(descs []*tabledesc.Immutable, initialHighWater hlc.Timestamp) rawTableFeed { +func newRawTableFeed(descs []catalog.TableDescriptor, initialHighWater hlc.Timestamp) rawTableFeed { sort.Slice(descs, func(i, j int) bool { - if descs[i].ID != descs[j].ID { - return descs[i].ID < descs[j].ID + if descs[i].GetID() != descs[j].GetID() { + return descs[i].GetID() < descs[j].GetID() } - return descs[i].ModificationTime.Less(descs[j].ModificationTime) + return descs[i].GetModificationTime().Less(descs[j].GetModificationTime()) }) f := rawTableFeed{} curID := descpb.ID(math.MaxUint32) for i, d := range descs { - if d.ID != curID { - curID = d.ID + if d.GetID() != curID { + curID = d.GetID() continue } - if d.ModificationTime.Less(initialHighWater) { + if d.GetModificationTime().Less(initialHighWater) { continue } f.events = append(f.events, schemafeed.TableEvent{ diff --git a/pkg/ccl/changefeedccl/rowfetcher_cache.go b/pkg/ccl/changefeedccl/rowfetcher_cache.go index 9c517ab34e30..cf0072e6dddc 100644 --- a/pkg/ccl/changefeedccl/rowfetcher_cache.go +++ b/pkg/ccl/changefeedccl/rowfetcher_cache.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/hydratedtables" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -69,8 +68,8 @@ func newRowFetcherCache( func (c *rowFetcherCache) TableDescForKey( ctx context.Context, key roachpb.Key, ts hlc.Timestamp, -) (*tabledesc.Immutable, error) { - var tableDesc *tabledesc.Immutable +) (catalog.TableDescriptor, error) { + var tableDesc catalog.TableDescriptor key, err := c.codec.StripTenantPrefix(key) if err != nil { return nil, err @@ -94,7 +93,7 @@ func (c *rowFetcherCache) TableDescForKey( if err := c.leaseMgr.Release(desc); err != nil { return nil, err } - tableDesc = desc.(*tabledesc.Immutable) + tableDesc = desc.(catalog.TableDescriptor) if tableDesc.ContainsUserDefinedTypes() { // If the table contains user defined types, then use the descs.Collection // to retrieve a TableDescriptor with type metadata hydrated. We open a @@ -140,23 +139,23 @@ func (c *rowFetcherCache) TableDescForKey( } func (c *rowFetcherCache) RowFetcherForTableDesc( - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, ) (*row.Fetcher, error) { - idVer := idVersion{id: tableDesc.ID, version: tableDesc.Version} + idVer := idVersion{id: tableDesc.GetID(), version: tableDesc.GetVersion()} // Ensure that all user defined types are up to date with the cached // version and the desired version to use the cache. It is safe to use // UserDefinedTypeColsHaveSameVersion if we have a hit because we are // guaranteed that the tables have the same version. Additionally, these // fetchers are always initialized with a single tabledesc.Immutable. if rf, ok := c.fetchers[idVer]; ok && - tableDesc.UserDefinedTypeColsHaveSameVersion(rf.GetTables()[0].(*tabledesc.Immutable)) { + tableDesc.UserDefinedTypeColsHaveSameVersion(rf.GetTables()[0].(catalog.TableDescriptor)) { return rf, nil } // TODO(dan): Allow for decoding a subset of the columns. var colIdxMap catalog.TableColMap var valNeededForCol util.FastIntSet - for colIdx := range tableDesc.Columns { - colIdxMap.Set(tableDesc.Columns[colIdx].ID, colIdx) + for colIdx := range tableDesc.GetPublicColumns() { + colIdxMap.Set(tableDesc.GetPublicColumns()[colIdx].ID, colIdx) valNeededForCol.Add(colIdx) } @@ -176,7 +175,7 @@ func (c *rowFetcherCache) RowFetcherForTableDesc( Index: tableDesc.GetPrimaryIndex().IndexDesc(), ColIdxMap: colIdxMap, IsSecondaryIndex: false, - Cols: tableDesc.Columns, + Cols: tableDesc.GetPublicColumns(), ValNeededForCol: valNeededForCol, }, ); err != nil { diff --git a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go index fe7e5b6aec22..40a3b882b69e 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go +++ b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go @@ -42,12 +42,12 @@ import ( // TableEvent represents a change to a table descriptor. type TableEvent struct { - Before, After *tabledesc.Immutable + Before, After catalog.TableDescriptor } // Timestamp refers to the ModificationTime of the After table descriptor. func (e TableEvent) Timestamp() hlc.Timestamp { - return e.After.ModificationTime + return e.After.GetModificationTime() } // Config configures a SchemaFeed. @@ -121,7 +121,7 @@ type SchemaFeed struct { // of the table descriptor seen by the poller. This is needed to determine // when a backilling mutation has successfully completed - this can only // be determining by comparing a version to the previous version. - previousTableVersion map[descpb.ID]*tabledesc.Immutable + previousTableVersion map[descpb.ID]catalog.TableDescriptor // typeDeps tracks dependencies from target tables to user defined types // that they use. @@ -166,7 +166,7 @@ func (t *typeDependencyTracker) removeDependency(typeID, tableID descpb.ID) { } } -func (t *typeDependencyTracker) purgeTable(tbl *tabledesc.Immutable) { +func (t *typeDependencyTracker) purgeTable(tbl catalog.TableDescriptor) { if !tbl.ContainsUserDefinedTypes() { return } @@ -176,7 +176,7 @@ func (t *typeDependencyTracker) purgeTable(tbl *tabledesc.Immutable) { } } -func (t *typeDependencyTracker) ingestTable(tbl *tabledesc.Immutable) { +func (t *typeDependencyTracker) ingestTable(tbl catalog.TableDescriptor) { if !tbl.ContainsUserDefinedTypes() { return } @@ -207,7 +207,7 @@ func New(cfg Config) *SchemaFeed { targets: cfg.Targets, leaseMgr: cfg.LeaseManager, } - m.mu.previousTableVersion = make(map[descpb.ID]*tabledesc.Immutable) + m.mu.previousTableVersion = make(map[descpb.ID]catalog.TableDescriptor) m.mu.highWater = cfg.InitialHighWater m.mu.typeDeps = typeDependencyTracker{deps: make(map[descpb.ID][]descpb.ID)} return m @@ -276,7 +276,7 @@ func (tf *SchemaFeed) primeInitialTableDescs(ctx context.Context) error { tf.mu.Lock() // Register all types used by the initial set of tables. for _, desc := range initialDescs { - tbl := desc.(*tabledesc.Immutable) + tbl := desc.(catalog.TableDescriptor) tf.mu.typeDeps.ingestTable(tbl) } tf.mu.Unlock() @@ -474,8 +474,8 @@ func (e TableEvent) String() string { return formatEvent(e) } -func formatDesc(desc *tabledesc.Immutable) string { - return fmt.Sprintf("%d:%d@%v", desc.ID, desc.Version, desc.ModificationTime) +func formatDesc(desc catalog.TableDescriptor) string { + return fmt.Sprintf("%d:%d@%v", desc.GetID(), desc.GetVersion(), desc.GetModificationTime()) } func formatEvent(e TableEvent) string { @@ -495,14 +495,14 @@ func (tf *SchemaFeed) validateDescriptor( // If a interesting type changed, then we just want to force the lease // manager to acquire the freshest version of the type. return tf.leaseMgr.AcquireFreshestFromStore(ctx, desc.ID) - case *tabledesc.Immutable: + case catalog.TableDescriptor: if err := changefeedbase.ValidateTable(tf.targets, desc); err != nil { return err } log.Infof(ctx, "validate %v", formatDesc(desc)) - if lastVersion, ok := tf.mu.previousTableVersion[desc.ID]; ok { + if lastVersion, ok := tf.mu.previousTableVersion[desc.GetID()]; ok { // NB: Writes can occur to a table - if desc.ModificationTime.LessEq(lastVersion.ModificationTime) { + if desc.GetModificationTime().LessEq(lastVersion.GetModificationTime()) { return nil } @@ -513,7 +513,7 @@ func (tf *SchemaFeed) validateDescriptor( // allowed; without this explicit load, the lease manager might therefore // return the previous version of the table, which is still technically // allowed by the schema change system. - if err := tf.leaseMgr.AcquireFreshestFromStore(ctx, desc.ID); err != nil { + if err := tf.leaseMgr.AcquireFreshestFromStore(ctx, desc.GetID()); err != nil { return err } @@ -534,7 +534,7 @@ func (tf *SchemaFeed) validateDescriptor( // The head could already have been handed out and sorting is not // stable. idxToSort := sort.Search(len(tf.mu.events), func(i int) bool { - return !tf.mu.events[i].After.ModificationTime.Less(earliestTsBeingIngested) + return !tf.mu.events[i].After.GetModificationTime().Less(earliestTsBeingIngested) }) tf.mu.events = append(tf.mu.events, e) toSort := tf.mu.events[idxToSort:] @@ -545,7 +545,7 @@ func (tf *SchemaFeed) validateDescriptor( } // Add the types used by the table into the dependency tracker. tf.mu.typeDeps.ingestTable(desc) - tf.mu.previousTableVersion[desc.ID] = desc + tf.mu.previousTableVersion[desc.GetID()] = desc return nil default: return errors.AssertionFailedf("unexpected descriptor type %T", desc) diff --git a/pkg/ccl/changefeedccl/schemafeed/schematestutils/BUILD.bazel b/pkg/ccl/changefeedccl/schemafeed/schematestutils/BUILD.bazel index 409eeb8c53f1..a73520490d16 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schematestutils/BUILD.bazel +++ b/pkg/ccl/changefeedccl/schemafeed/schematestutils/BUILD.bazel @@ -6,6 +6,7 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/schemafeed/schematestutils", visibility = ["//visibility:public"], deps = [ + "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", "//pkg/sql/types", diff --git a/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go b/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go index b235af3aefe9..8f91952fe4ad 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go +++ b/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go @@ -13,6 +13,7 @@ package schematestutils import ( "strconv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -23,7 +24,7 @@ import ( // MakeTableDesc makes a generic table descriptor with the provided properties. func MakeTableDesc( tableID descpb.ID, version descpb.DescriptorVersion, modTime hlc.Timestamp, cols int, -) *tabledesc.Immutable { +) catalog.TableDescriptor { td := descpb.TableDescriptor{ Name: "foo", ID: tableID, @@ -49,21 +50,21 @@ func MakeColumnDesc(id descpb.ColumnID) *descpb.ColumnDescriptor { } // AddColumnDropBackfillMutation adds a mutation to desc to drop a column. -// Yes, this does modify an Immutable. -func AddColumnDropBackfillMutation(desc *tabledesc.Immutable) *tabledesc.Immutable { - desc.Mutations = append(desc.Mutations, descpb.DescriptorMutation{ +// Yes, this does modify an immutable. +func AddColumnDropBackfillMutation(desc catalog.TableDescriptor) catalog.TableDescriptor { + desc.TableDesc().Mutations = append(desc.TableDesc().Mutations, descpb.DescriptorMutation{ State: descpb.DescriptorMutation_DELETE_AND_WRITE_ONLY, Direction: descpb.DescriptorMutation_DROP, - Descriptor_: &descpb.DescriptorMutation_Column{Column: MakeColumnDesc(desc.NextColumnID - 1)}, + Descriptor_: &descpb.DescriptorMutation_Column{Column: MakeColumnDesc(desc.GetNextColumnID() - 1)}, }) return desc } // AddNewColumnBackfillMutation adds a mutation to desc to add a column. -// Yes, this does modify an Immutable. -func AddNewColumnBackfillMutation(desc *tabledesc.Immutable) *tabledesc.Immutable { - desc.Mutations = append(desc.Mutations, descpb.DescriptorMutation{ - Descriptor_: &descpb.DescriptorMutation_Column{Column: MakeColumnDesc(desc.NextColumnID)}, +// Yes, this does modify an immutable. +func AddNewColumnBackfillMutation(desc catalog.TableDescriptor) catalog.TableDescriptor { + desc.TableDesc().Mutations = append(desc.TableDesc().Mutations, descpb.DescriptorMutation{ + Descriptor_: &descpb.DescriptorMutation_Column{Column: MakeColumnDesc(desc.GetNextColumnID())}, State: descpb.DescriptorMutation_DELETE_AND_WRITE_ONLY, Direction: descpb.DescriptorMutation_ADD, MutationID: 0, diff --git a/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go b/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go index 91991f910dea..b75b31c0af45 100644 --- a/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go +++ b/pkg/ccl/changefeedccl/schemafeed/table_event_filter.go @@ -12,8 +12,8 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/errors" ) @@ -74,10 +74,10 @@ func (b tableEventFilter) shouldFilter(ctx context.Context, e TableEvent) (bool, et := classifyTableEvent(e) // Truncation events are not ignored and return an error. if et == tableEventTruncate { - return false, errors.Errorf(`"%s" was truncated`, e.Before.Name) + return false, errors.Errorf(`"%s" was truncated`, e.Before.GetName()) } if et == tableEventPrimaryKeyChange { - return false, errors.Errorf(`"%s" primary key changed`, e.Before.Name) + return false, errors.Errorf(`"%s" primary key changed`, e.Before.GetName()) } shouldFilter, ok := b[et] if !ok { @@ -92,8 +92,8 @@ func hasNewColumnDropBackfillMutation(e TableEvent) (res bool) { return !dropColumnMutationExists(e.Before) && dropColumnMutationExists(e.After) } -func dropColumnMutationExists(desc *tabledesc.Immutable) bool { - for _, m := range desc.Mutations { +func dropColumnMutationExists(desc catalog.TableDescriptor) bool { + for _, m := range desc.GetMutations() { if m.GetColumn() == nil { continue } @@ -108,17 +108,17 @@ func dropColumnMutationExists(desc *tabledesc.Immutable) bool { func newColumnBackfillComplete(e TableEvent) (res bool) { // TODO(ajwerner): What is the case where the before has a backfill mutation // and the After doesn't? What about other queued mutations? - return len(e.Before.Columns) < len(e.After.Columns) && + return len(e.Before.GetPublicColumns()) < len(e.After.GetPublicColumns()) && e.Before.HasColumnBackfillMutation() && !e.After.HasColumnBackfillMutation() } func newColumnNoBackfill(e TableEvent) (res bool) { - return len(e.Before.Columns) < len(e.After.Columns) && + return len(e.Before.GetPublicColumns()) < len(e.After.GetPublicColumns()) && !e.Before.HasColumnBackfillMutation() } -func pkChangeMutationExists(desc *tabledesc.Immutable) bool { - for _, m := range desc.Mutations { +func pkChangeMutationExists(desc catalog.TableDescriptor) bool { + for _, m := range desc.GetMutations() { if m.Direction == descpb.DescriptorMutation_ADD && m.GetPrimaryKeySwap() != nil { return true } @@ -134,6 +134,6 @@ func tableTruncated(e TableEvent) bool { } func primaryKeyChanged(e TableEvent) bool { - return e.Before.PrimaryIndex.ID != e.After.PrimaryIndex.ID && + return e.Before.GetPrimaryIndexID() != e.After.GetPrimaryIndexID() && pkChangeMutationExists(e.Before) } diff --git a/pkg/ccl/changefeedccl/schemafeed/table_event_filter_test.go b/pkg/ccl/changefeedccl/schemafeed/table_event_filter_test.go index adad530edeb4..fde8002e8a72 100644 --- a/pkg/ccl/changefeedccl/schemafeed/table_event_filter_test.go +++ b/pkg/ccl/changefeedccl/schemafeed/table_event_filter_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/schemafeed/schematestutils" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" @@ -73,9 +73,9 @@ func TestTableEventFilter(t *testing.T) { name: "don't filter end of add NULL-able computed column", p: defaultTableEventFilter, e: TableEvent{ - Before: func() *tabledesc.Immutable { + Before: func() catalog.TableDescriptor { td := addColBackfill(mkTableDesc(42, 4, ts(4), 1)) - col := td.Mutations[0].GetColumn() + col := td.GetMutations()[0].GetColumn() col.Nullable = true col.ComputeExpr = proto.String("1") return td diff --git a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go index 189c685634f6..5d8d35deaaef 100644 --- a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go +++ b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go @@ -206,7 +206,7 @@ func TestCloudStorageSink(t *testing.T) { require.True(t, sf.Forward(testSpan, ts(4))) require.NoError(t, s.Flush(ctx)) require.NoError(t, s.EmitRow(ctx, t1, noKey, []byte(`v4`), ts(4))) - t1.Version = 2 + t1.TableDesc().Version = 2 require.NoError(t, s.EmitRow(ctx, t1, noKey, []byte(`v5`), ts(5))) require.NoError(t, s.Flush(ctx)) expected = []string{ @@ -502,11 +502,11 @@ func TestCloudStorageSink(t *testing.T) { require.NoError(t, err) require.NoError(t, s.EmitRow(ctx, t1, noKey, []byte(`v1`), ts(1))) - t1.Version = 1 + t1.TableDesc().Version = 1 require.NoError(t, s.EmitRow(ctx, t1, noKey, []byte(`v3`), ts(1))) // Make the first file exceed its file size threshold. This should trigger a flush // for the first file but not the second one. - t1.Version = 0 + t1.TableDesc().Version = 0 require.NoError(t, s.EmitRow(ctx, t1, noKey, []byte(`trigger-flush-v1`), ts(1))) require.Equal(t, []string{ "v1\ntrigger-flush-v1\n", @@ -515,7 +515,7 @@ func TestCloudStorageSink(t *testing.T) { // Now make the file with the newer schema exceed its file size threshold and ensure // that the file with the older schema is flushed (and ordered) before. require.NoError(t, s.EmitRow(ctx, t1, noKey, []byte(`v2`), ts(1))) - t1.Version = 1 + t1.TableDesc().Version = 1 require.NoError(t, s.EmitRow(ctx, t1, noKey, []byte(`trigger-flush-v3`), ts(1))) require.Equal(t, []string{ "v1\ntrigger-flush-v1\n", @@ -525,7 +525,7 @@ func TestCloudStorageSink(t *testing.T) { // Calling `Flush()` on the sink should emit files in the order of their schema IDs. require.NoError(t, s.EmitRow(ctx, t1, noKey, []byte(`w1`), ts(1))) - t1.Version = 0 + t1.TableDesc().Version = 0 require.NoError(t, s.EmitRow(ctx, t1, noKey, []byte(`x1`), ts(1))) require.NoError(t, s.Flush(ctx)) require.Equal(t, []string{ diff --git a/pkg/ccl/changefeedccl/sink_test.go b/pkg/ccl/changefeedccl/sink_test.go index f8ea3de82883..2d5e8d8c79ce 100644 --- a/pkg/ccl/changefeedccl/sink_test.go +++ b/pkg/ccl/changefeedccl/sink_test.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -54,7 +55,7 @@ func TestKafkaSink(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - table := func(name string) *tabledesc.Immutable { + table := func(name string) catalog.TableDescriptor { return tabledesc.NewImmutable(descpb.TableDescriptor{Name: name}) } @@ -146,7 +147,7 @@ func TestKafkaSinkEscaping(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - table := func(name string) *tabledesc.Immutable { + table := func(name string) catalog.TableDescriptor { return tabledesc.NewImmutable(descpb.TableDescriptor{Name: name}) } @@ -187,7 +188,7 @@ func TestSQLSink(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - table := func(name string) *tabledesc.Immutable { + table := func(name string) catalog.TableDescriptor { id, _ := strconv.ParseUint(name, 36, 64) return tabledesc.NewImmutable(descpb.TableDescriptor{Name: name, ID: descpb.ID(id)}) } diff --git a/pkg/ccl/importccl/BUILD.bazel b/pkg/ccl/importccl/BUILD.bazel index e163cffb884e..2350a4f2b8ad 100644 --- a/pkg/ccl/importccl/BUILD.bazel +++ b/pkg/ccl/importccl/BUILD.bazel @@ -133,6 +133,7 @@ go_test( "//pkg/server", "//pkg/settings/cluster", "//pkg/sql", + "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/catformat", "//pkg/sql/catalog/descpb", diff --git a/pkg/ccl/importccl/import_processor.go b/pkg/ccl/importccl/import_processor.go index 9fdb3ed04381..7aae80e09781 100644 --- a/pkg/ccl/importccl/import_processor.go +++ b/pkg/ccl/importccl/import_processor.go @@ -173,7 +173,7 @@ func makeInputConverter( seqChunkProvider *row.SeqChunkProvider, ) (inputConverter, error) { injectTimeIntoEvalCtx(evalCtx, spec.WalltimeNanos) - var singleTable *tabledesc.Immutable + var singleTable catalog.TableDescriptor var singleTableTargetCols tree.NameList if len(spec.Tables) == 1 { for _, table := range spec.Tables { diff --git a/pkg/ccl/importccl/import_stmt.go b/pkg/ccl/importccl/import_stmt.go index b69e61ff2f14..15ff10424af3 100644 --- a/pkg/ccl/importccl/import_stmt.go +++ b/pkg/ccl/importccl/import_stmt.go @@ -1641,15 +1641,15 @@ func (r *importResumer) dropTables( return nil } - var revert []*tabledesc.Immutable - var empty []*tabledesc.Immutable + var revert []catalog.TableDescriptor + var empty []catalog.TableDescriptor for _, tbl := range details.Tables { if !tbl.IsNew { desc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn) if err != nil { return err } - imm := desc.ImmutableCopy().(*tabledesc.Immutable) + imm := desc.ImmutableCopy().(catalog.TableDescriptor) if tbl.WasEmpty { empty = append(empty, imm) } else { @@ -1679,7 +1679,7 @@ func (r *importResumer) dropTables( for i := range empty { if err := gcjob.ClearTableData(ctx, execCfg.DB, execCfg.DistSender, execCfg.Codec, empty[i]); err != nil { - return errors.Wrapf(err, "clearing data for table %d", empty[i].ID) + return errors.Wrapf(err, "clearing data for table %d", empty[i].GetID()) } } diff --git a/pkg/ccl/importccl/import_stmt_test.go b/pkg/ccl/importccl/import_stmt_test.go index eb9f171e0ee1..986d0ab6073b 100644 --- a/pkg/ccl/importccl/import_stmt_test.go +++ b/pkg/ccl/importccl/import_stmt_test.go @@ -39,9 +39,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/distsql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" @@ -2097,7 +2097,7 @@ func TestImportCSVStmt(t *testing.T) { // it was created in. dbID := sqlutils.QueryDatabaseID(t, sqlDB.DB, "failedimport") tableID := descpb.ID(dbID + 1) - var td *tabledesc.Immutable + var td catalog.TableDescriptor if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { td, err = catalogkv.MustGetTableDescByID(ctx, txn, keys.SystemSQLCodec, tableID) return err @@ -3634,7 +3634,7 @@ func BenchmarkCSVConvertRecord(b *testing.B) { importCtx := ¶llelImportContext{ evalCtx: &evalCtx, - tableDesc: tableDesc.ImmutableCopy().(*tabledesc.Immutable), + tableDesc: tableDesc.ImmutableCopy().(catalog.TableDescriptor), kvCh: kvCh, } @@ -4541,7 +4541,7 @@ func BenchmarkDelimitedConvertRecord(b *testing.B) { RowSeparator: '\n', FieldSeparator: '\t', }, kvCh, 0, 0, - tableDesc.ImmutableCopy().(*tabledesc.Immutable), nil /* targetCols */, &evalCtx) + tableDesc.ImmutableCopy().(catalog.TableDescriptor), nil /* targetCols */, &evalCtx) require.NoError(b, err) producer := &csvBenchmarkStream{ @@ -4644,7 +4644,7 @@ func BenchmarkPgCopyConvertRecord(b *testing.B) { Null: `\N`, MaxRowSize: 4096, }, kvCh, 0, 0, - tableDesc.ImmutableCopy().(*tabledesc.Immutable), nil /* targetCols */, &evalCtx) + tableDesc.ImmutableCopy().(catalog.TableDescriptor), nil /* targetCols */, &evalCtx) require.NoError(b, err) producer := &csvBenchmarkStream{ diff --git a/pkg/ccl/importccl/import_table_creation.go b/pkg/ccl/importccl/import_table_creation.go index dcc7f3d84fea..cfac361306df 100644 --- a/pkg/ccl/importccl/import_table_creation.go +++ b/pkg/ccl/importccl/import_table_creation.go @@ -318,7 +318,7 @@ func (r fkResolver) LookupSchema( // Implements the sql.SchemaResolver interface. func (r fkResolver) LookupTableByID( ctx context.Context, id descpb.ID, -) (*tabledesc.Immutable, error) { +) (catalog.TableDescriptor, error) { return nil, errSchemaResolver } diff --git a/pkg/ccl/importccl/read_import_avro.go b/pkg/ccl/importccl/read_import_avro.go index 4f02d3d8f717..f7f67375b51d 100644 --- a/pkg/ccl/importccl/read_import_avro.go +++ b/pkg/ccl/importccl/read_import_avro.go @@ -18,7 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/lexbase" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" @@ -454,7 +454,7 @@ var _ inputConverter = &avroInputReader{} func newAvroInputReader( kvCh chan row.KVBatch, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, avroOpts roachpb.AvroOptions, walltime int64, parallelism int, diff --git a/pkg/ccl/importccl/read_import_avro_test.go b/pkg/ccl/importccl/read_import_avro_test.go index b8e0b09e3743..0e747781d2d9 100644 --- a/pkg/ccl/importccl/read_import_avro_test.go +++ b/pkg/ccl/importccl/read_import_avro_test.go @@ -21,8 +21,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -142,7 +142,7 @@ func (g *intArrGen) Gen() interface{} { // A testHelper to generate avro data. type testHelper struct { schemaJSON string - schemaTable *tabledesc.Immutable + schemaTable catalog.TableDescriptor codec *goavro.Codec gens []avroGen settings *cluster.Settings @@ -200,7 +200,7 @@ func newTestHelper(ctx context.Context, t *testing.T, gens ...avroGen) *testHelp return &testHelper{ schemaJSON: string(schemaJSON), schemaTable: descForTable(ctx, t, createStmt, 10, 20, NoFKs). - ImmutableCopy().(*tabledesc.Immutable), + ImmutableCopy().(catalog.TableDescriptor), codec: codec, gens: gens, settings: st, @@ -595,7 +595,7 @@ func benchmarkAvroImport(b *testing.B, avroOpts roachpb.AvroOptions, testData st require.NoError(b, err) avro, err := newAvroInputReader(kvCh, - tableDesc.ImmutableCopy().(*tabledesc.Immutable), + tableDesc.ImmutableCopy().(catalog.TableDescriptor), avroOpts, 0, 0, &evalCtx) require.NoError(b, err) diff --git a/pkg/ccl/importccl/read_import_base.go b/pkg/ccl/importccl/read_import_base.go index 7f2dbcee48fa..dc1693232339 100644 --- a/pkg/ccl/importccl/read_import_base.go +++ b/pkg/ccl/importccl/read_import_base.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -412,14 +412,14 @@ func newImportRowError(err error, row string, num int64) error { // parallelImportContext describes state associated with the import. type parallelImportContext struct { - walltime int64 // Import time stamp. - numWorkers int // Parallelism. - batchSize int // Number of records to batch. - evalCtx *tree.EvalContext // Evaluation context. - tableDesc *tabledesc.Immutable // Table descriptor we're importing into. - targetCols tree.NameList // List of columns to import. nil if importing all columns. - kvCh chan row.KVBatch // Channel for sending KV batches. - seqChunkProvider *row.SeqChunkProvider // Used to reserve chunks of sequence values. + walltime int64 // Import time stamp. + numWorkers int // Parallelism. + batchSize int // Number of records to batch. + evalCtx *tree.EvalContext // Evaluation context. + tableDesc catalog.TableDescriptor // Table descriptor we're importing into. + targetCols tree.NameList // List of columns to import. nil if importing all columns. + kvCh chan row.KVBatch // Channel for sending KV batches. + seqChunkProvider *row.SeqChunkProvider // Used to reserve chunks of sequence values. } // importFileContext describes state specific to a file being imported. diff --git a/pkg/ccl/importccl/read_import_csv.go b/pkg/ccl/importccl/read_import_csv.go index d68bc01cad3f..b087ddc39f25 100644 --- a/pkg/ccl/importccl/read_import_csv.go +++ b/pkg/ccl/importccl/read_import_csv.go @@ -15,7 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -39,7 +39,7 @@ func newCSVInputReader( opts roachpb.CSVOptions, walltime int64, parallelism int, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, targetCols tree.NameList, evalCtx *tree.EvalContext, seqChunkProvider *row.SeqChunkProvider, diff --git a/pkg/ccl/importccl/read_import_mysqlout.go b/pkg/ccl/importccl/read_import_mysqlout.go index 7e847c52422e..43eb38506f2f 100644 --- a/pkg/ccl/importccl/read_import_mysqlout.go +++ b/pkg/ccl/importccl/read_import_mysqlout.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -38,7 +38,7 @@ func newMysqloutfileReader( kvCh chan row.KVBatch, walltime int64, parallelism int, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, targetCols tree.NameList, evalCtx *tree.EvalContext, ) (*mysqloutfileReader, error) { diff --git a/pkg/ccl/importccl/read_import_pgcopy.go b/pkg/ccl/importccl/read_import_pgcopy.go index 9906fbc77f1e..225de1d14e67 100644 --- a/pkg/ccl/importccl/read_import_pgcopy.go +++ b/pkg/ccl/importccl/read_import_pgcopy.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -44,7 +44,7 @@ func newPgCopyReader( kvCh chan row.KVBatch, walltime int64, parallelism int, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, targetCols tree.NameList, evalCtx *tree.EvalContext, ) (*pgCopyReader, error) { diff --git a/pkg/ccl/importccl/read_import_workload.go b/pkg/ccl/importccl/read_import_workload.go index 65bac517563e..500997fe3c56 100644 --- a/pkg/ccl/importccl/read_import_workload.go +++ b/pkg/ccl/importccl/read_import_workload.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -36,14 +36,14 @@ import ( type workloadReader struct { evalCtx *tree.EvalContext - table *tabledesc.Immutable + table catalog.TableDescriptor kvCh chan row.KVBatch } var _ inputConverter = &workloadReader{} func newWorkloadReader( - kvCh chan row.KVBatch, table *tabledesc.Immutable, evalCtx *tree.EvalContext, + kvCh chan row.KVBatch, table catalog.TableDescriptor, evalCtx *tree.EvalContext, ) *workloadReader { return &workloadReader{evalCtx: evalCtx, table: table, kvCh: kvCh} } @@ -178,7 +178,7 @@ func (w *workloadReader) readFiles( // WorkloadKVConverter converts workload.BatchedTuples to []roachpb.KeyValues. type WorkloadKVConverter struct { - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor rows workload.BatchedTuples batchIdxAtomic int64 batchEnd int @@ -194,7 +194,7 @@ type WorkloadKVConverter struct { // range of batches, emitted converted kvs to the given channel. func NewWorkloadKVConverter( fileID int32, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, rows workload.BatchedTuples, batchStart, batchEnd int, kvCh chan row.KVBatch, diff --git a/pkg/ccl/partitionccl/drop_test.go b/pkg/ccl/partitionccl/drop_test.go index 4fc714521c88..20602693a3ba 100644 --- a/pkg/ccl/partitionccl/drop_test.go +++ b/pkg/ccl/partitionccl/drop_test.go @@ -91,7 +91,7 @@ func TestDropIndexWithZoneConfigCCL(t *testing.T) { // All zone configs should still exist. var buf []byte cfg := &zonepb.ZoneConfig{} - sqlDB.QueryRow(t, "SELECT config FROM system.zones WHERE id = $1", tableDesc.ID).Scan(&buf) + sqlDB.QueryRow(t, "SELECT config FROM system.zones WHERE id = $1", tableDesc.GetID()).Scan(&buf) if err := protoutil.Unmarshal(buf, cfg); err != nil { t.Fatal(err) } @@ -129,7 +129,7 @@ func TestDropIndexWithZoneConfigCCL(t *testing.T) { } else if l := 0; len(kvs) != l { return errors.Errorf("expected %d key value pairs, but got %d", l, len(kvs)) } - sqlDB.QueryRow(t, "SELECT config FROM system.zones WHERE id = $1", tableDesc.ID).Scan(&buf) + sqlDB.QueryRow(t, "SELECT config FROM system.zones WHERE id = $1", tableDesc.GetID()).Scan(&buf) if err := protoutil.Unmarshal(buf, cfg); err != nil { return err } diff --git a/pkg/ccl/partitionccl/partition_test.go b/pkg/ccl/partitionccl/partition_test.go index a48ccc3fb810..80673478c7c1 100644 --- a/pkg/ccl/partitionccl/partition_test.go +++ b/pkg/ccl/partitionccl/partition_test.go @@ -1413,7 +1413,7 @@ ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) // Get the zone config corresponding to the table. table := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "t") - kv, err := kvDB.Get(ctx, config.MakeZoneKey(config.SystemTenantObjectID(table.ID))) + kv, err := kvDB.Get(ctx, config.MakeZoneKey(config.SystemTenantObjectID(table.GetID()))) if err != nil { t.Fatal(err) } @@ -1436,7 +1436,7 @@ ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) } // Subzone spans have the table prefix omitted. - prefix := keys.SystemSQLCodec.TablePrefix(uint32(table.ID)) + prefix := keys.SystemSQLCodec.TablePrefix(uint32(table.GetID())) for i := range expectedSpans { // Subzone spans have the table prefix omitted. expected := bytes.TrimPrefix(expectedSpans[i], prefix) diff --git a/pkg/ccl/storageccl/BUILD.bazel b/pkg/ccl/storageccl/BUILD.bazel index a00f59f7ed86..8c8f3e141194 100644 --- a/pkg/ccl/storageccl/BUILD.bazel +++ b/pkg/ccl/storageccl/BUILD.bazel @@ -25,6 +25,7 @@ go_library( "//pkg/roachpb", "//pkg/settings", "//pkg/settings/cluster", + "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", "//pkg/sql/sem/builtins", diff --git a/pkg/ccl/storageccl/key_rewriter.go b/pkg/ccl/storageccl/key_rewriter.go index c12b906fbd9e..d9a1ed2891bc 100644 --- a/pkg/ccl/storageccl/key_rewriter.go +++ b/pkg/ccl/storageccl/key_rewriter.go @@ -14,6 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -77,12 +78,12 @@ func (p prefixRewriter) rewriteKey(key []byte) ([]byte, bool) { // and splits. type KeyRewriter struct { prefixes prefixRewriter - descs map[descpb.ID]*tabledesc.Immutable + descs map[descpb.ID]catalog.TableDescriptor } // MakeKeyRewriterFromRekeys makes a KeyRewriter from Rekey protos. func MakeKeyRewriterFromRekeys(rekeys []roachpb.ImportRequest_TableRekey) (*KeyRewriter, error) { - descs := make(map[descpb.ID]*tabledesc.Immutable) + descs := make(map[descpb.ID]catalog.TableDescriptor) for _, rekey := range rekeys { var desc descpb.Descriptor if err := protoutil.Unmarshal(rekey.NewDesc, &desc); err != nil { @@ -98,7 +99,7 @@ func MakeKeyRewriterFromRekeys(rekeys []roachpb.ImportRequest_TableRekey) (*KeyR } // MakeKeyRewriter makes a KeyRewriter from a map of descs keyed by original ID. -func MakeKeyRewriter(descs map[descpb.ID]*tabledesc.Immutable) (*KeyRewriter, error) { +func MakeKeyRewriter(descs map[descpb.ID]catalog.TableDescriptor) (*KeyRewriter, error) { var prefixes prefixRewriter seenPrefixes := make(map[string]bool) for oldID, desc := range descs { @@ -107,7 +108,7 @@ func MakeKeyRewriter(descs map[descpb.ID]*tabledesc.Immutable) (*KeyRewriter, er for _, index := range desc.NonDropIndexes() { oldPrefix := roachpb.Key(makeKeyRewriterPrefixIgnoringInterleaved(oldID, index.GetID())) - newPrefix := roachpb.Key(makeKeyRewriterPrefixIgnoringInterleaved(desc.ID, index.GetID())) + newPrefix := roachpb.Key(makeKeyRewriterPrefixIgnoringInterleaved(desc.GetID(), index.GetID())) if !seenPrefixes[string(oldPrefix)] { seenPrefixes[string(oldPrefix)] = true prefixes.rewrites = append(prefixes.rewrites, prefixRewrite{ diff --git a/pkg/ccl/storageccl/key_rewriter_test.go b/pkg/ccl/storageccl/key_rewriter_test.go index 2db50dd46474..5deecb1d57a0 100644 --- a/pkg/ccl/storageccl/key_rewriter_test.go +++ b/pkg/ccl/storageccl/key_rewriter_test.go @@ -60,7 +60,7 @@ func TestPrefixRewriter(t *testing.T) { func TestKeyRewriter(t *testing.T) { defer leaktest.AfterTest(t)() - desc := tabledesc.NewCreatedMutable(systemschema.NamespaceTable.TableDescriptor) + desc := tabledesc.NewCreatedMutable(*systemschema.NamespaceTable.TableDesc()) oldID := desc.ID newID := desc.ID + 1 desc.ID = newID diff --git a/pkg/ccl/streamingccl/streamclient/random_stream_client.go b/pkg/ccl/streamingccl/streamclient/random_stream_client.go index 61e7a5b34bdb..822ac6760617 100644 --- a/pkg/ccl/streamingccl/streamclient/random_stream_client.go +++ b/pkg/ccl/streamingccl/streamclient/random_stream_client.go @@ -125,7 +125,7 @@ func newRandomStreamClient(streamURL *url.URL) (Client, error) { 50, /* defaultdb */ descpb.ID(tableID), RandomStreamSchema, - systemschema.JobsTable.Privileges, + systemschema.JobsTable.GetPrivileges(), ) if err != nil { return nil, err diff --git a/pkg/ccl/workloadccl/format/BUILD.bazel b/pkg/ccl/workloadccl/format/BUILD.bazel index 5867272e1c9b..50619aedb5d0 100644 --- a/pkg/ccl/workloadccl/format/BUILD.bazel +++ b/pkg/ccl/workloadccl/format/BUILD.bazel @@ -8,8 +8,8 @@ go_library( deps = [ "//pkg/ccl/importccl", "//pkg/keys", + "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/parser", "//pkg/sql/row", "//pkg/sql/sem/tree", diff --git a/pkg/ccl/workloadccl/format/sstable.go b/pkg/ccl/workloadccl/format/sstable.go index c36a753b2442..3ae2ed695b3b 100644 --- a/pkg/ccl/workloadccl/format/sstable.go +++ b/pkg/ccl/workloadccl/format/sstable.go @@ -15,8 +15,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/importccl" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -32,7 +32,7 @@ import ( // Table. func ToTableDescriptor( t workload.Table, tableID descpb.ID, ts time.Time, -) (*tabledesc.Immutable, error) { +) (catalog.TableDescriptor, error) { ctx := context.Background() semaCtx := tree.MakeSemaContext() stmt, err := parser.ParseOne(fmt.Sprintf(`CREATE TABLE "%s" %s`, t.Name, t.Schema)) @@ -49,7 +49,7 @@ func ToTableDescriptor( if err != nil { return nil, err } - return tableDesc.ImmutableCopy().(*tabledesc.Immutable), nil + return tableDesc.ImmutableCopy().(catalog.TableDescriptor), nil } // ToSSTable constructs a single sstable with the kvs necessary to represent a diff --git a/pkg/jobs/registry.go b/pkg/jobs/registry.go index df3b37afc38f..fbf34efa3b78 100644 --- a/pkg/jobs/registry.go +++ b/pkg/jobs/registry.go @@ -860,7 +860,7 @@ func (r *Registry) isOrphaned(ctx context.Context, payload *jobspb.Payload) (boo return err } hasAnyMutations := len(td.GetMutations()) != 0 || len(td.GetGCMutations()) != 0 - hasDropJob := td.DropJobID != 0 + hasDropJob := td.TableDesc().DropJobID != 0 pendingMutations = hasAnyMutations || hasDropJob return nil }); err != nil { diff --git a/pkg/server/settingsworker.go b/pkg/server/settingsworker.go index 8e435c113c8d..10494a10c5fb 100644 --- a/pkg/server/settingsworker.go +++ b/pkg/server/settingsworker.go @@ -36,8 +36,8 @@ func processSystemConfigKVs( a := &rowenc.DatumAlloc{} codec := keys.TODOSQLCodec - settingsTablePrefix := codec.TablePrefix(uint32(tbl.ID)) - colIdxMap := row.ColIDtoRowIndexFromCols(tbl.Columns) + settingsTablePrefix := codec.TablePrefix(uint32(tbl.GetID())) + colIdxMap := row.ColIDtoRowIndexFromCols(tbl.GetPublicColumns()) var settingsKVs []roachpb.KeyValue processKV := func(ctx context.Context, kv roachpb.KeyValue, u settings.Updater) error { @@ -48,7 +48,7 @@ func processSystemConfigKVs( var k, v, t string // First we need to decode the setting name field from the index key. { - types := []*types.T{tbl.Columns[0].Type} + types := []*types.T{tbl.GetPublicColumns()[0].Type} nameRow := make([]rowenc.EncDatum, 1) _, matches, _, err := rowenc.DecodeIndexKey(codec, tbl, tbl.GetPrimaryIndex().IndexDesc(), types, nameRow, nil, kv.Key) if err != nil { @@ -83,16 +83,16 @@ func processSystemConfigKVs( colID := lastColID + descpb.ColumnID(colIDDiff) lastColID = colID if idx, ok := colIdxMap.Get(colID); ok { - res, bytes, err = rowenc.DecodeTableValue(a, tbl.Columns[idx].Type, bytes) + res, bytes, err = rowenc.DecodeTableValue(a, tbl.GetPublicColumns()[idx].Type, bytes) if err != nil { return err } switch colID { - case tbl.Columns[1].ID: // value + case tbl.GetPublicColumns()[1].ID: // value v = string(tree.MustBeDString(res)) - case tbl.Columns[3].ID: // valueType + case tbl.GetPublicColumns()[3].ID: // valueType t = string(tree.MustBeDString(res)) - case tbl.Columns[2].ID: // lastUpdated + case tbl.GetPublicColumns()[2].ID: // lastUpdated // TODO(dt): we could decode just the len and then seek `bytes` past // it, without allocating/decoding the unused timestamp. default: diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel index 0280e4d1ce26..ceafad59b2f5 100644 --- a/pkg/sql/BUILD.bazel +++ b/pkg/sql/BUILD.bazel @@ -345,6 +345,7 @@ go_library( "//pkg/util/grpcutil", "//pkg/util/hlc", "//pkg/util/humanizeutil", + "//pkg/util/iterutil", "//pkg/util/json", "//pkg/util/log", "//pkg/util/log/eventpb", diff --git a/pkg/sql/alter_column_type_test.go b/pkg/sql/alter_column_type_test.go index 54df55b025e5..1acdeab0a7e7 100644 --- a/pkg/sql/alter_column_type_test.go +++ b/pkg/sql/alter_column_type_test.go @@ -244,7 +244,7 @@ ALTER TABLE t.test ALTER COLUMN x TYPE INT; // Ensure that the add column and column swap mutations are cleaned up. testutils.SucceedsSoon(t, func() error { desc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(desc.Mutations) != 0 { + if len(desc.GetMutations()) != 0 { return errors.New("expected no mutations on TableDescriptor") } return nil diff --git a/pkg/sql/alter_primary_key.go b/pkg/sql/alter_primary_key.go index baf783fcfb50..ef57c3457689 100644 --- a/pkg/sql/alter_primary_key.go +++ b/pkg/sql/alter_primary_key.go @@ -104,7 +104,7 @@ func (p *planner) AlterPrimaryKey( if err != nil { return err } - sb.WriteString(childTable.Name) + sb.WriteString(childTable.GetName()) } sb.WriteString("]") return errors.Newf( diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 38166ef1faa0..81cedf6e2899 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -290,7 +290,7 @@ func (sc *SchemaChanger) runBackfill(ctx context.Context) error { if err != nil { return err } - version = descs[tableDesc.ID].Version + version = descs[tableDesc.ID].GetVersion() } // Drop indexes not to be removed by `ClearRange`. @@ -353,7 +353,7 @@ func (sc *SchemaChanger) runBackfill(ctx context.Context) error { // on the new version of the table descriptor. It returns the new table descs. func (sc *SchemaChanger) dropConstraints( ctx context.Context, constraints []descpb.ConstraintToUpdate, -) (map[descpb.ID]*tabledesc.Immutable, error) { +) (map[descpb.ID]catalog.TableDescriptor, error) { log.Infof(ctx, "dropping %d constraints", len(constraints)) fksByBackrefTable := make(map[descpb.ID][]*descpb.ConstraintToUpdate) @@ -468,7 +468,7 @@ func (sc *SchemaChanger) dropConstraints( } log.Info(ctx, "finished dropping constraints") - tableDescs := make(map[descpb.ID]*tabledesc.Immutable, len(fksByBackrefTable)+1) + tableDescs := make(map[descpb.ID]catalog.TableDescriptor, len(fksByBackrefTable)+1) if err := sc.txn(ctx, func( ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ) (err error) { @@ -660,7 +660,7 @@ func (sc *SchemaChanger) validateConstraints( } readAsOf := sc.clock.Now() - var tableDesc *tabledesc.Immutable + var tableDesc catalog.TableDescriptor if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) error { tableDesc, err = catalogkv.MustGetTableDescByID(ctx, txn, sc.execCfg.Codec, sc.descID) @@ -684,10 +684,11 @@ func (sc *SchemaChanger) validateConstraints( // (the validation can take many minutes). So we pretend that the schema // has been updated and actually update it in a separate transaction that // follows this one. - desc, err := tableDesc.MakeFirstMutationPublic(tabledesc.IgnoreConstraints) + descI, err := tableDesc.MakeFirstMutationPublic(tabledesc.IgnoreConstraints) if err != nil { return err } + desc := descI.(*tabledesc.Mutable) // Each check operates at the historical timestamp. return runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, evalCtx *extendedEvalContext) error { // If the constraint is a check constraint that fails validation, we @@ -744,13 +745,13 @@ func (sc *SchemaChanger) validateConstraints( // reuse an existing kv.Txn safely. func (sc *SchemaChanger) getTableVersion( ctx context.Context, txn *kv.Txn, tc *descs.Collection, version descpb.DescriptorVersion, -) (*tabledesc.Immutable, error) { +) (catalog.TableDescriptor, error) { tableDesc, err := tc.GetImmutableTableByID(ctx, txn, sc.descID, tree.ObjectLookupFlags{}) if err != nil { return nil, err } - if version != tableDesc.Version { - return nil, makeErrTableVersionMismatch(tableDesc.Version, version) + if version != tableDesc.GetVersion() { + return nil, makeErrTableVersionMismatch(tableDesc.GetVersion(), version) } return tableDesc, nil } @@ -764,7 +765,7 @@ func (sc *SchemaChanger) getTableVersion( func TruncateInterleavedIndexes( ctx context.Context, execCfg *ExecutorConfig, - table *tabledesc.Immutable, + table catalog.TableDescriptor, indexes []descpb.IndexDescriptor, ) error { log.Infof(ctx, "truncating %d interleaved indexes", len(indexes)) @@ -774,7 +775,7 @@ func TruncateInterleavedIndexes( for _, desc := range indexes { var resume roachpb.Span for rowIdx, done := int64(0), false; !done; rowIdx += chunkSize { - log.VEventf(ctx, 2, "truncate interleaved index (%d) at row: %d, span: %s", table.ID, rowIdx, resume) + log.VEventf(ctx, 2, "truncate interleaved index (%d) at row: %d, span: %s", table.GetID(), rowIdx, resume) resumeAt := resume // Make a new txn just to drop this chunk. if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -799,7 +800,7 @@ func TruncateInterleavedIndexes( // All the data chunks have been removed. Now also removed the // zone configs for the dropped indexes, if any. if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return RemoveIndexZoneConfigs(ctx, txn, execCfg, table.ParentID, indexes) + return RemoveIndexZoneConfigs(ctx, txn, execCfg, table.GetParentID(), indexes) }); err != nil { return err } @@ -1405,7 +1406,7 @@ func (sc *SchemaChanger) validateIndexes(ctx context.Context) error { } readAsOf := sc.clock.Now() - var tableDesc *tabledesc.Immutable + var tableDesc catalog.TableDescriptor if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) (err error) { tableDesc, err = catalogkv.MustGetTableDescByID(ctx, txn, sc.execCfg.Codec, sc.descID) return err @@ -1416,7 +1417,7 @@ func (sc *SchemaChanger) validateIndexes(ctx context.Context) error { var forwardIndexes []*descpb.IndexDescriptor var invertedIndexes []*descpb.IndexDescriptor - for _, m := range tableDesc.Mutations { + for _, m := range tableDesc.GetMutations() { if sc.mutationID != m.MutationID { break } @@ -1464,7 +1465,7 @@ func (sc *SchemaChanger) validateIndexes(ctx context.Context) error { // at the historical fixed timestamp for checks. func (sc *SchemaChanger) validateInvertedIndexes( ctx context.Context, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, indexes []*descpb.IndexDescriptor, runHistoricalTxn historicalTxnRunner, ) error { @@ -1506,7 +1507,7 @@ func (sc *SchemaChanger) validateInvertedIndexes( return err } log.Infof(ctx, "inverted index %s/%s count = %d, took %s", - tableDesc.Name, idx.Name, idxLen, timeutil.Since(start)) + tableDesc.GetName(), idx.Name, idxLen, timeutil.Since(start)) select { case <-countReady[i]: if idxLen != expectedCount[i] { @@ -1535,12 +1536,12 @@ func (sc *SchemaChanger) validateInvertedIndexes( if geoindex.IsEmptyConfig(&idx.GeoConfig) { stmt = fmt.Sprintf( `SELECT coalesce(sum_int(crdb_internal.num_inverted_index_entries(%q, %d)), 0) FROM [%d AS t]`, - col, idx.Version, tableDesc.ID, + col, idx.Version, tableDesc.GetID(), ) } else { stmt = fmt.Sprintf( `SELECT coalesce(sum_int(crdb_internal.num_geo_inverted_index_entries(%d, %d, %q)), 0) FROM [%d AS t]`, - tableDesc.ID, idx.ID, col, tableDesc.ID, + tableDesc.GetID(), idx.ID, col, tableDesc.GetID(), ) } // If the index is a partial index the predicate must be added @@ -1562,7 +1563,7 @@ func (sc *SchemaChanger) validateInvertedIndexes( return err } log.Infof(ctx, "column %s/%s expected inverted index count = %d, took %s", - tableDesc.Name, col, expectedCount[i], timeutil.Since(start)) + tableDesc.GetName(), col, expectedCount[i], timeutil.Since(start)) return nil }) } @@ -1578,7 +1579,7 @@ func (sc *SchemaChanger) validateInvertedIndexes( // at the historical fixed timestamp for checks. func (sc *SchemaChanger) validateForwardIndexes( ctx context.Context, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, indexes []*descpb.IndexDescriptor, runHistoricalTxn historicalTxnRunner, ) error { @@ -1632,7 +1633,7 @@ func (sc *SchemaChanger) validateForwardIndexes( } log.Infof(ctx, "validation: index %s/%s row count = %d, time so far %s", - tableDesc.Name, idx.Name, idxLen, timeutil.Since(start)) + tableDesc.GetName(), idx.Name, idxLen, timeutil.Since(start)) // Now compare with the row count in the table. select { @@ -1670,10 +1671,11 @@ func (sc *SchemaChanger) validateForwardIndexes( // added earlier in the same mutation. Make the mutations public in an // in-memory copy of the descriptor and add it to the Collection's synthetic // descriptors, so that we can use SQL below to perform the validation. - desc, err := tableDesc.MakeFirstMutationPublic(tabledesc.IgnoreConstraints) + descI, err := tableDesc.MakeFirstMutationPublic(tabledesc.IgnoreConstraints) if err != nil { return err } + desc := descI.(*tabledesc.Mutable) // Count the number of rows in the table. if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, evalCtx *extendedEvalContext) error { @@ -1689,7 +1691,7 @@ func (sc *SchemaChanger) validateForwardIndexes( // Force the primary index so that the optimizer does not create a // query plan that uses the indexes being backfilled. - query := fmt.Sprintf(`SELECT count(1)%s FROM [%d AS t]@[%d]`, partialIndexCounts, desc.ID, desc.GetPrimaryIndexID()) + query := fmt.Sprintf(`SELECT count(1)%s FROM [%d AS t]@[%d]`, partialIndexCounts, desc.GetID(), desc.GetPrimaryIndexID()) ie := evalCtx.InternalExecutor.(*InternalExecutor) return ie.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error { @@ -1718,7 +1720,7 @@ func (sc *SchemaChanger) validateForwardIndexes( tableRowCountTime = timeutil.Since(start) log.Infof(ctx, "validation: table %s row count = %d, took %s", - tableDesc.Name, tableRowCount, tableRowCountTime) + tableDesc.GetName(), tableRowCount, tableRowCountTime) return nil }) @@ -2206,7 +2208,7 @@ func columnBackfillInTxn( txn *kv.Txn, evalCtx *tree.EvalContext, semaCtx *tree.SemaContext, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, traceKV bool, ) error { // A column backfill in the ADD state is a noop. @@ -2248,7 +2250,7 @@ func indexBackfillInTxn( txn *kv.Txn, evalCtx *tree.EvalContext, semaCtx *tree.SemaContext, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, traceKV bool, ) error { var indexBackfillerMon *mon.BytesMonitor @@ -2283,7 +2285,7 @@ func indexTruncateInTxn( txn *kv.Txn, execCfg *ExecutorConfig, evalCtx *tree.EvalContext, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, idx *descpb.IndexDescriptor, traceKV bool, ) error { @@ -2304,5 +2306,5 @@ func indexTruncateInTxn( } } // Remove index zone configs. - return RemoveIndexZoneConfigs(ctx, txn, execCfg, tableDesc.ID, []descpb.IndexDescriptor{*idx}) + return RemoveIndexZoneConfigs(ctx, txn, execCfg, tableDesc.GetID(), []descpb.IndexDescriptor{*idx}) } diff --git a/pkg/sql/backfill/backfill.go b/pkg/sql/backfill/backfill.go index 44a640e4074c..b466c4271538 100644 --- a/pkg/sql/backfill/backfill.go +++ b/pkg/sql/backfill/backfill.go @@ -76,17 +76,15 @@ type ColumnBackfiller struct { } // initCols is a helper to populate some column metadata on a ColumnBackfiller. -func (cb *ColumnBackfiller) initCols(desc *tabledesc.Immutable) { - if len(desc.Mutations) > 0 { - for _, m := range desc.Mutations { - if ColumnMutationFilter(m) { - desc := *m.GetColumn() - switch m.Direction { - case descpb.DescriptorMutation_ADD: - cb.added = append(cb.added, desc) - case descpb.DescriptorMutation_DROP: - cb.dropped = append(cb.dropped, desc) - } +func (cb *ColumnBackfiller) initCols(desc catalog.TableDescriptor) { + for _, m := range desc.GetMutations() { + if ColumnMutationFilter(m) { + desc := *m.GetColumn() + switch m.Direction { + case descpb.DescriptorMutation_ADD: + cb.added = append(cb.added, desc) + case descpb.DescriptorMutation_DROP: + cb.dropped = append(cb.dropped, desc) } } } @@ -98,7 +96,7 @@ func (cb *ColumnBackfiller) init( evalCtx *tree.EvalContext, defaultExprs []tree.TypedExpr, computedExprs []tree.TypedExpr, - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, mon *mon.BytesMonitor, ) error { cb.evalCtx = evalCtx @@ -121,13 +119,13 @@ func (cb *ColumnBackfiller) init( // We need all the columns. var valNeededForCol util.FastIntSet - valNeededForCol.AddRange(0, len(desc.Columns)-1) + valNeededForCol.AddRange(0, len(desc.GetPublicColumns())-1) tableArgs := row.FetcherTableArgs{ Desc: desc, Index: desc.GetPrimaryIndex().IndexDesc(), ColIdxMap: desc.ColumnIdxMap(), - Cols: desc.Columns, + Cols: desc.GetPublicColumns(), ValNeededForCol: valNeededForCol, } @@ -157,7 +155,7 @@ func (cb *ColumnBackfiller) InitForLocalUse( ctx context.Context, evalCtx *tree.EvalContext, semaCtx *tree.SemaContext, - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, mon *mon.BytesMonitor, ) error { cb.initCols(desc) @@ -172,7 +170,7 @@ func (cb *ColumnBackfiller) InitForLocalUse( cb.added, desc.GetPublicColumns(), desc, - tree.NewUnqualifiedTableName(tree.Name(desc.Name)), + tree.NewUnqualifiedTableName(tree.Name(desc.GetName())), evalCtx, semaCtx, ) @@ -188,7 +186,10 @@ func (cb *ColumnBackfiller) InitForLocalUse( // necessary due to the different procedure for accessing user defined type // metadata as part of a distributed flow. func (cb *ColumnBackfiller) InitForDistributedUse( - ctx context.Context, flowCtx *execinfra.FlowCtx, desc *tabledesc.Immutable, mon *mon.BytesMonitor, + ctx context.Context, + flowCtx *execinfra.FlowCtx, + desc catalog.TableDescriptor, + mon *mon.BytesMonitor, ) error { cb.initCols(desc) evalCtx := flowCtx.NewEvalCtx() @@ -216,7 +217,7 @@ func (cb *ColumnBackfiller) InitForDistributedUse( cb.added, desc.GetPublicColumns(), desc, - tree.NewUnqualifiedTableName(tree.Name(desc.Name)), + tree.NewUnqualifiedTableName(tree.Name(desc.GetName())), evalCtx, &semaCtx, ) @@ -248,7 +249,7 @@ func (cb *ColumnBackfiller) Close(ctx context.Context) { func (cb *ColumnBackfiller) RunColumnBackfillChunk( ctx context.Context, txn *kv.Txn, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, sp roachpb.Span, chunkSize int64, alsoCommit bool, @@ -256,8 +257,8 @@ func (cb *ColumnBackfiller) RunColumnBackfillChunk( ) (roachpb.Key, error) { // TODO(dan): Tighten up the bound on the requestedCols parameter to // makeRowUpdater. - requestedCols := make([]descpb.ColumnDescriptor, 0, len(tableDesc.Columns)+len(cb.added)+len(cb.dropped)) - requestedCols = append(requestedCols, tableDesc.Columns...) + requestedCols := make([]descpb.ColumnDescriptor, 0, len(tableDesc.GetPublicColumns())+len(cb.added)+len(cb.dropped)) + requestedCols = append(requestedCols, tableDesc.GetPublicColumns()...) requestedCols = append(requestedCols, cb.added...) requestedCols = append(requestedCols, cb.dropped...) ru, err := row.MakeUpdater( @@ -302,7 +303,7 @@ func (cb *ColumnBackfiller) RunColumnBackfillChunk( b := txn.NewBatch() rowLength := 0 iv := &schemaexpr.RowIndexedVarContainer{ - Cols: append(tableDesc.Columns, cb.added...), + Cols: append(tableDesc.GetPublicColumns(), cb.added...), Mapping: ru.FetchColIDtoRowIndex, } cb.evalCtx.IVarContainer = iv @@ -366,7 +367,9 @@ func (cb *ColumnBackfiller) RunColumnBackfillChunk( } // ConvertBackfillError returns a cleaner SQL error for a failed Batch. -func ConvertBackfillError(ctx context.Context, tableDesc *tabledesc.Immutable, b *kv.Batch) error { +func ConvertBackfillError( + ctx context.Context, tableDesc catalog.TableDescriptor, b *kv.Batch, +) error { // A backfill on a new schema element has failed and the batch contains // information useful in printing a sensible error. However // ConvertBatchError() will only work correctly if the schema elements @@ -375,7 +378,7 @@ func ConvertBackfillError(ctx context.Context, tableDesc *tabledesc.Immutable, b if err != nil { return err } - return row.ConvertBatchError(ctx, tabledesc.NewImmutable(*desc.TableDesc()), b) + return row.ConvertBatchError(ctx, desc, b) } type muBoundAccount struct { @@ -446,7 +449,7 @@ func (ib *IndexBackfiller) InitForLocalUse( ctx context.Context, evalCtx *tree.EvalContext, semaCtx *tree.SemaContext, - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, mon *mon.BytesMonitor, ) error { // Initialize ib.cols and ib.colIdxMap. @@ -565,7 +568,10 @@ func constructExprs( // due to the different procedure for accessing user defined type metadata as // part of a distributed flow. func (ib *IndexBackfiller) InitForDistributedUse( - ctx context.Context, flowCtx *execinfra.FlowCtx, desc *tabledesc.Immutable, mon *mon.BytesMonitor, + ctx context.Context, + flowCtx *execinfra.FlowCtx, + desc catalog.TableDescriptor, + mon *mon.BytesMonitor, ) error { // Initialize ib.cols and ib.colIdxMap. ib.initCols(desc) @@ -643,22 +649,22 @@ func (ib *IndexBackfiller) ShrinkBoundAccount(ctx context.Context, shrinkBy int6 // initCols is a helper to populate column metadata of an IndexBackfiller. It // populates the cols and colIdxMap fields. -func (ib *IndexBackfiller) initCols(desc *tabledesc.Immutable) { - for i := range desc.Columns { - col := &desc.Columns[i] +func (ib *IndexBackfiller) initCols(desc catalog.TableDescriptor) { + for i := range desc.GetPublicColumns() { + col := &desc.GetPublicColumns()[i] ib.cols = append(ib.cols, *col) if col.IsComputed() && col.Virtual { ib.computedCols = append(ib.computedCols, *col) } } - ib.cols = append([]descpb.ColumnDescriptor(nil), desc.Columns...) + ib.cols = append([]descpb.ColumnDescriptor(nil), desc.GetPublicColumns()...) // If there are ongoing mutations, add columns that are being added and in // the DELETE_AND_WRITE_ONLY state. - if len(desc.Mutations) > 0 { - ib.cols = make([]descpb.ColumnDescriptor, 0, len(desc.Columns)+len(desc.Mutations)) - ib.cols = append(ib.cols, desc.Columns...) - for _, m := range desc.Mutations { + if len(desc.GetMutations()) > 0 { + ib.cols = make([]descpb.ColumnDescriptor, 0, len(desc.GetPublicColumns())+len(desc.GetMutations())) + ib.cols = append(ib.cols, desc.GetPublicColumns()...) + for _, m := range desc.GetMutations() { if column := m.GetColumn(); column != nil && m.Direction == descpb.DescriptorMutation_ADD && m.State == descpb.DescriptorMutation_DELETE_AND_WRITE_ONLY { @@ -681,13 +687,13 @@ func (ib *IndexBackfiller) initCols(desc *tabledesc.Immutable) { // initIndexes is a helper to populate index metadata of an IndexBackfiller. It // populates the added field. It returns a set of column ordinals that must be // fetched in order to backfill the added indexes. -func (ib *IndexBackfiller) initIndexes(desc *tabledesc.Immutable) util.FastIntSet { +func (ib *IndexBackfiller) initIndexes(desc catalog.TableDescriptor) util.FastIntSet { var valNeededForCol util.FastIntSet - mutationID := desc.Mutations[0].MutationID + mutationID := desc.GetMutations()[0].MutationID // Mutations in the same transaction have the same ID. Loop through the // mutations and collect all index mutations. - for _, m := range desc.Mutations { + for _, m := range desc.GetMutations() { if m.MutationID != mutationID { break } @@ -700,7 +706,7 @@ func (ib *IndexBackfiller) initIndexes(desc *tabledesc.Immutable) util.FastIntSe isPrimaryIndex := idx.GetEncodingType(desc.GetPrimaryIndexID()) == descpb.PrimaryIndexEncoding if (idxContainsColumn || isPrimaryIndex) && !ib.cols[i].Virtual && - i < len(desc.Columns) { + i < len(desc.GetPublicColumns()) { valNeededForCol.Add(i) } } @@ -716,7 +722,7 @@ func (ib *IndexBackfiller) init( predicateExprs map[descpb.IndexID]tree.TypedExpr, colExprs map[descpb.ColumnID]tree.TypedExpr, valNeededForCol util.FastIntSet, - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, mon *mon.BytesMonitor, ) error { ib.evalCtx = evalCtx @@ -775,7 +781,7 @@ func (ib *IndexBackfiller) init( func (ib *IndexBackfiller) BuildIndexEntriesChunk( ctx context.Context, txn *kv.Txn, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, sp roachpb.Span, chunkSize int64, traceKV bool, @@ -958,7 +964,7 @@ func (ib *IndexBackfiller) BuildIndexEntriesChunk( func (ib *IndexBackfiller) RunIndexBackfillChunk( ctx context.Context, txn *kv.Txn, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, sp roachpb.Span, chunkSize int64, alsoCommit bool, diff --git a/pkg/sql/catalog/bootstrap/metadata.go b/pkg/sql/catalog/bootstrap/metadata.go index 3bd9518253e5..7818715e16f8 100644 --- a/pkg/sql/catalog/bootstrap/metadata.go +++ b/pkg/sql/catalog/bootstrap/metadata.go @@ -271,7 +271,7 @@ func LookupSystemTableDescriptorID( if settings != nil && !settings.Version.IsActive(ctx, clusterversion.NamespaceTableWithSchemas) && tableName == systemschema.NamespaceTableName { - return systemschema.DeprecatedNamespaceTable.ID + return systemschema.DeprecatedNamespaceTable.GetID() } systemTenant := boolToInt(codec.ForSystemTenant()) dbID, ok := systemTableIDCache[systemTenant][tableName] diff --git a/pkg/sql/catalog/catalogkeys/keys.go b/pkg/sql/catalog/catalogkeys/keys.go index c9ad591ddc2c..b998a0984c90 100644 --- a/pkg/sql/catalog/catalogkeys/keys.go +++ b/pkg/sql/catalog/catalogkeys/keys.go @@ -276,12 +276,12 @@ func (ddk DeprecatedDatabaseKey) Name() string { func MakeNameMetadataKey( codec keys.SQLCodec, parentID, parentSchemaID descpb.ID, name string, ) roachpb.Key { - k := codec.IndexPrefix(uint32(systemschema.NamespaceTable.ID), uint32(systemschema.NamespaceTable.GetPrimaryIndexID())) + k := codec.IndexPrefix(uint32(systemschema.NamespaceTable.GetID()), uint32(systemschema.NamespaceTable.GetPrimaryIndexID())) k = encoding.EncodeUvarintAscending(k, uint64(parentID)) k = encoding.EncodeUvarintAscending(k, uint64(parentSchemaID)) if name != "" { k = encoding.EncodeBytesAscending(k, []byte(name)) - k = keys.MakeFamilyKey(k, uint32(systemschema.NamespaceTable.Columns[3].ID)) + k = keys.MakeFamilyKey(k, uint32(systemschema.NamespaceTable.GetPublicColumns()[3].ID)) } return k } @@ -334,11 +334,11 @@ func MakeDeprecatedNameMetadataKey( codec keys.SQLCodec, parentID descpb.ID, name string, ) roachpb.Key { k := codec.IndexPrefix( - uint32(systemschema.DeprecatedNamespaceTable.ID), uint32(systemschema.DeprecatedNamespaceTable.GetPrimaryIndexID())) + uint32(systemschema.DeprecatedNamespaceTable.GetID()), uint32(systemschema.DeprecatedNamespaceTable.GetPrimaryIndexID())) k = encoding.EncodeUvarintAscending(k, uint64(parentID)) if name != "" { k = encoding.EncodeBytesAscending(k, []byte(name)) - k = keys.MakeFamilyKey(k, uint32(systemschema.DeprecatedNamespaceTable.Columns[2].ID)) + k = keys.MakeFamilyKey(k, uint32(systemschema.DeprecatedNamespaceTable.GetPublicColumns()[2].ID)) } return k } diff --git a/pkg/sql/catalog/catalogkv/catalogkv.go b/pkg/sql/catalog/catalogkv/catalogkv.go index 301d607c6ff7..e1c6c455bdb2 100644 --- a/pkg/sql/catalog/catalogkv/catalogkv.go +++ b/pkg/sql/catalog/catalogkv/catalogkv.go @@ -553,13 +553,13 @@ func GetDatabaseDescByID( // returning an error if the table is not found. func MustGetTableDescByID( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, -) (*tabledesc.Immutable, error) { +) (catalog.TableDescriptor, error) { desc, err := GetDescriptorByID(ctx, txn, codec, id, Immutable, TableDescriptorKind, true /* required */) if err != nil || desc == nil { return nil, err } - return desc.(*tabledesc.Immutable), nil + return desc.(catalog.TableDescriptor), nil } // MustGetDatabaseDescByID looks up the database descriptor given its ID, diff --git a/pkg/sql/catalog/catalogkv/test_utils.go b/pkg/sql/catalog/catalogkv/test_utils.go index 2793bf462c20..45b935de7a11 100644 --- a/pkg/sql/catalog/catalogkv/test_utils.go +++ b/pkg/sql/catalog/catalogkv/test_utils.go @@ -30,8 +30,8 @@ import ( // trivial change that just touches lots of lines. func TestingGetTableDescriptorFromSchema( kvDB *kv.DB, codec keys.SQLCodec, database string, schema string, table string, -) *tabledesc.Immutable { - return testingGetObjectDescriptor(kvDB, codec, database, schema, table).(*tabledesc.Immutable) +) catalog.TableDescriptor { + return testingGetObjectDescriptor(kvDB, codec, database, schema, table).(catalog.TableDescriptor) } // TestingGetTableDescriptor retrieves a table descriptor directly from the KV @@ -42,7 +42,7 @@ func TestingGetTableDescriptorFromSchema( // removing it altogether. func TestingGetTableDescriptor( kvDB *kv.DB, codec keys.SQLCodec, database string, table string, -) *tabledesc.Immutable { +) catalog.TableDescriptor { return TestingGetImmutableTableDescriptor(kvDB, codec, database, table) } @@ -50,8 +50,8 @@ func TestingGetTableDescriptor( // directly from the KV layer. func TestingGetImmutableTableDescriptor( kvDB *kv.DB, codec keys.SQLCodec, database string, table string, -) *tabledesc.Immutable { - return testingGetObjectDescriptor(kvDB, codec, database, "public", table).(*tabledesc.Immutable) +) catalog.TableDescriptor { + return testingGetObjectDescriptor(kvDB, codec, database, "public", table).(catalog.TableDescriptor) } // TestingGetMutableExistingTableDescriptor retrieves a Mutable diff --git a/pkg/sql/catalog/descriptor.go b/pkg/sql/catalog/descriptor.go index 524396492a4b..80366f5db3d9 100644 --- a/pkg/sql/catalog/descriptor.go +++ b/pkg/sql/catalog/descriptor.go @@ -109,8 +109,11 @@ type TableDescriptor interface { GetState() descpb.DescriptorState GetSequenceOpts() *descpb.TableDescriptor_SequenceOpts + GetCreateQuery() string GetViewQuery() string GetLease() *descpb.TableDescriptor_SchemaChangeLease + GetCreateAsOfTime() hlc.Timestamp + GetModificationTime() hlc.Timestamp GetDropTime() int64 GetFormatVersion() descpb.FormatVersion @@ -119,6 +122,8 @@ type TableDescriptor interface { IsPartitionAllBy() bool PrimaryIndexSpan(codec keys.SQLCodec) roachpb.Span IndexSpan(codec keys.SQLCodec, id descpb.IndexID) roachpb.Span + AllIndexSpans(codec keys.SQLCodec) roachpb.Spans + TableSpan(codec keys.SQLCodec) roachpb.Span GetIndexMutationCapabilities(id descpb.IndexID) (isMutation, isWriteOnly bool) KeysPerRow(id descpb.IndexID) (int, error) @@ -190,6 +195,8 @@ type TableDescriptor interface { // canonical order, see Index.Ordinal(). FindIndexWithName(name string) (Index, error) + GetNextIndexID() descpb.IndexID + HasPrimaryKey() bool PrimaryKeyString() string @@ -211,6 +218,16 @@ type TableDescriptor interface { ContainsUserDefinedTypes() bool GetColumnOrdinalsWithUserDefinedTypes() []int UserDefinedTypeColsHaveSameVersion(otherDesc TableDescriptor) bool + FindActiveColumnByName(s string) (*descpb.ColumnDescriptor, error) + WritableColumns() []descpb.ColumnDescriptor + ReadableColumns() []descpb.ColumnDescriptor + GetNextColumnID() descpb.ColumnID + HasColumnWithName(name tree.Name) (*descpb.ColumnDescriptor, bool) + FindActiveColumnsByNames(names tree.NameList) ([]descpb.ColumnDescriptor, error) + ColumnTypes() []*types.T + ColumnTypesWithMutations(mutations bool) []*types.T + ColumnTypesWithMutationsAndVirtualCol(mutations bool, virtualCol *descpb.ColumnDescriptor) []*types.T + CheckConstraintUsesColumn(cc *descpb.TableDescriptor_CheckConstraint, colID descpb.ColumnID) (bool, error) GetFamilies() []descpb.ColumnFamilyDescriptor NumFamilies() int @@ -225,7 +242,12 @@ type TableDescriptor interface { IsPhysicalTable() bool IsInterleaved() bool MaterializedView() bool + IsAs() bool + HasColumnBackfillMutation() bool + MakeFirstMutationPublic(includeConstraints bool) (TableDescriptor, error) + GetMutations() []descpb.DescriptorMutation + GetGCMutations() []descpb.TableDescriptor_GCDescriptorMutation GetMutationJobs() []descpb.TableDescriptor_MutationJob GetReplacementOf() descpb.TableDescriptor_Replacement @@ -245,8 +267,10 @@ type TableDescriptor interface { GetUniqueWithoutIndexConstraints() []descpb.UniqueWithoutIndexConstraint AllActiveAndInactiveUniqueWithoutIndexConstraints() []*descpb.UniqueWithoutIndexConstraint ForeachInboundFK(f func(fk *descpb.ForeignKeyConstraint) error) error - FindActiveColumnByName(s string) (*descpb.ColumnDescriptor, error) - WritableColumns() []descpb.ColumnDescriptor + GetConstraintInfo(ctx context.Context, dg DescGetter) (map[string]descpb.ConstraintDetail, error) + AllActiveAndInactiveForeignKeys() []*descpb.ForeignKeyConstraint + GetInboundFKs() []descpb.ForeignKeyConstraint + GetOutboundFKs() []descpb.ForeignKeyConstraint GetLocalityConfig() *descpb.TableDescriptor_LocalityConfig IsLocalityRegionalByRow() bool diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go index 261332718f05..c01ab3630dbb 100644 --- a/pkg/sql/catalog/descs/collection.go +++ b/pkg/sql/catalog/descs/collection.go @@ -500,9 +500,9 @@ func (tc *Collection) getObjectByName( // (i.e., the ones used during authn/authz flows). // TODO (lucy): Reevaluate the above. We have many more system tables now and // should be able to lease most of them. - isAllowedSystemTable := objectName == systemschema.RoleMembersTable.Name || - objectName == systemschema.RoleOptionsTable.Name || - objectName == systemschema.UsersTable.Name + isAllowedSystemTable := objectName == systemschema.RoleMembersTable.GetName() || + objectName == systemschema.RoleOptionsTable.GetName() || + objectName == systemschema.UsersTable.GetName() avoidCache := flags.AvoidCached || mutable || lease.TestingTableLeasesAreDisabled() || (catalogName == systemschema.SystemDatabaseName && !isAllowedSystemTable) if avoidCache { @@ -538,12 +538,12 @@ func (tc *Collection) GetMutableTableByName( // according to the provided lookup flags. RequireMutable is ignored. func (tc *Collection) GetImmutableTableByName( ctx context.Context, txn *kv.Txn, name tree.ObjectName, flags tree.ObjectLookupFlags, -) (found bool, _ *tabledesc.Immutable, _ error) { +) (found bool, _ catalog.TableDescriptor, _ error) { found, desc, err := tc.getTableByName(ctx, txn, name, flags, false /* mutable */) if err != nil || !found { return false, nil, err } - return true, desc.(*tabledesc.Immutable), nil + return true, desc, nil } // getTableByName returns a table descriptor with properties according to the @@ -921,12 +921,12 @@ func (tc *Collection) GetMutableTableByID( // the ID exists. func (tc *Collection) GetImmutableTableByID( ctx context.Context, txn *kv.Txn, tableID descpb.ID, flags tree.ObjectLookupFlags, -) (*tabledesc.Immutable, error) { +) (catalog.TableDescriptor, error) { desc, err := tc.getTableByID(ctx, txn, tableID, flags, false /* mutable */) if err != nil { return nil, err } - return desc.(*tabledesc.Immutable), nil + return desc, nil } func (tc *Collection) getTableByID( @@ -1190,7 +1190,7 @@ func (tc *Collection) hydrateTypesInTableDesc( } return desc, typedesc.HydrateTypesInTableDescriptor(ctx, t.TableDesc(), typedesc.TypeLookupFunc(getType)) - case *tabledesc.Immutable: + case catalog.TableDescriptor: // ImmutableTableDescriptors need to be copied before hydration, because // they are potentially read by multiple threads. If there aren't any user // defined types in the descriptor, then return early. @@ -1389,9 +1389,9 @@ func (tc *Collection) GetDescriptorsWithNewVersion() []lease.IDVersion { // GetUncommittedTables returns all the tables updated or created in the // transaction. -func (tc *Collection) GetUncommittedTables() (tables []*tabledesc.Immutable) { +func (tc *Collection) GetUncommittedTables() (tables []catalog.TableDescriptor) { for _, desc := range tc.uncommittedDescriptors { - table, ok := desc.immutable.(*tabledesc.Immutable) + table, ok := desc.immutable.(catalog.TableDescriptor) if ok && desc.immutable.IsUncommittedVersion() { tables = append(tables, table) } @@ -1654,7 +1654,8 @@ func HydrateGivenDescriptors(ctx context.Context, descs []catalog.Descriptor) er if desc.Dropped() { continue } - if tblDesc, ok := desc.(*tabledesc.Immutable); ok { + tblDesc, ok := desc.(catalog.TableDescriptor) + if ok { if err := typedesc.HydrateTypesInTableDescriptor( ctx, tblDesc.TableDesc(), diff --git a/pkg/sql/catalog/descs/collection_test.go b/pkg/sql/catalog/descs/collection_test.go index 08c6819be2b6..45e0539a2f6e 100644 --- a/pkg/sql/catalog/descs/collection_test.go +++ b/pkg/sql/catalog/descs/collection_test.go @@ -352,14 +352,14 @@ func TestSyntheticDescriptorResolution(t *testing.T) { require.NoError(t, err) // Modify the column name. - desc.Columns[0].Name = "bar" + desc.TableDesc().Columns[0].Name = "bar" descriptors.SetSyntheticDescriptors([]catalog.Descriptor{desc}) // Resolve the table by name again. found, desc, err = descriptors.GetImmutableTableByName(ctx, txn, &tn, tree.ObjectLookupFlags{}) require.True(t, found) require.NoError(t, err) - require.Equal(t, "bar", desc.Columns[0].Name) + require.Equal(t, "bar", desc.GetPublicColumns()[0].Name) // Attempting to resolve the table mutably is not allowed. _, _, err = descriptors.GetMutableTableByName(ctx, txn, &tn, tree.ObjectLookupFlags{}) @@ -369,7 +369,7 @@ func TestSyntheticDescriptorResolution(t *testing.T) { desc, err = descriptors.GetImmutableTableByID(ctx, txn, tableID, tree.ObjectLookupFlags{}) require.NoError(t, err) - require.Equal(t, "bar", desc.Columns[0].Name) + require.Equal(t, "bar", desc.GetPublicColumns()[0].Name) // Attempting to resolve the table mutably is not allowed. _, err = descriptors.GetMutableTableByID(ctx, txn, tableID, tree.ObjectLookupFlags{}) diff --git a/pkg/sql/catalog/hydratedtables/hydratedcache.go b/pkg/sql/catalog/hydratedtables/hydratedcache.go index 40c93ad78fa1..036d7928d43d 100644 --- a/pkg/sql/catalog/hydratedtables/hydratedcache.go +++ b/pkg/sql/catalog/hydratedtables/hydratedcache.go @@ -45,7 +45,7 @@ import ( // of the referenced types which ensures that user always uses properly leased // descriptors. While all of the types will need to be resolved, they should // already be cached so, in this way, this cache prevents the need to copy -// and re-construct the tabledesc.Immutable in most cases. +// and re-construct the tabledesc.immutable in most cases. type Cache struct { settings *cluster.Settings g singleflight.Group @@ -123,7 +123,7 @@ func NewCache(settings *cluster.Settings) *Cache { } type hydratedTableDescriptor struct { - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor typeDescs []*cachedType } @@ -141,8 +141,8 @@ type cachedType struct { // descriptor on their own. If the table descriptor does not contain any // user-defined types, it will be returned unchanged. func (c *Cache) GetHydratedTableDescriptor( - ctx context.Context, table *tabledesc.Immutable, res catalog.TypeDescriptorResolver, -) (hydrated *tabledesc.Immutable, err error) { + ctx context.Context, table catalog.TableDescriptor, res catalog.TypeDescriptorResolver, +) (hydrated catalog.TableDescriptor, err error) { // If the table has an uncommitted version, it cannot be cached. Return nil // forcing the caller to hydrate. @@ -251,7 +251,7 @@ func (c *Cache) GetHydratedTableDescriptor( if err != nil { return nil, err } - return res.(*tabledesc.Immutable), nil + return res.(catalog.TableDescriptor), nil } } diff --git a/pkg/sql/catalog/hydratedtables/hydratedcache_test.go b/pkg/sql/catalog/hydratedtables/hydratedcache_test.go index 077b7465ecb4..77cec3a188a0 100644 --- a/pkg/sql/catalog/hydratedtables/hydratedcache_test.go +++ b/pkg/sql/catalog/hydratedtables/hydratedcache_test.go @@ -38,7 +38,7 @@ func TestHydratedCache(t *testing.T) { m := c.Metrics() dg := mkDescGetter(descs...) res := &descGetterTypeDescriptorResolver{dg: &dg} - td := tableDescUDT.ImmutableCopy().(*tabledesc.Immutable) + td := tableDescUDT.ImmutableCopy().(catalog.TableDescriptor) hydrated, err := c.GetHydratedTableDescriptor(ctx, td, res) require.NoError(t, err) assertMetrics(t, m, 0, 1) @@ -50,7 +50,7 @@ func TestHydratedCache(t *testing.T) { // Show that the cache returned a new pointer and hydrated the UDT // (user-defined type). require.NotEqual(t, tableDescUDT, hydrated) - require.EqualValues(t, hydrated.Columns[0].Type, typ1T) + require.EqualValues(t, hydrated.GetPublicColumns()[0].Type, typ1T) // Try again and ensure we get pointer-for-pointer the same descriptor. res.calls = 0 @@ -68,7 +68,7 @@ func TestHydratedCache(t *testing.T) { m := c.Metrics() dg := mkDescGetter(descs...) res := &descGetterTypeDescriptorResolver{dg: &dg} - td := tableDescNoUDT.ImmutableCopy().(*tabledesc.Immutable) + td := tableDescNoUDT.ImmutableCopy().(catalog.TableDescriptor) _, err := c.GetHydratedTableDescriptor(ctx, td, res) require.NoError(t, err) assertMetrics(t, m, 0, 0) @@ -78,7 +78,7 @@ func TestHydratedCache(t *testing.T) { m := c.Metrics() dg := mkDescGetter(descs...) res := &descGetterTypeDescriptorResolver{dg: &dg} - td := tableDescUDT.ImmutableCopy().(*tabledesc.Immutable) + td := tableDescUDT.ImmutableCopy().(catalog.TableDescriptor) hydrated, err := c.GetHydratedTableDescriptor(ctx, td, res) require.NoError(t, err) assertMetrics(t, m, 0, 1) @@ -102,7 +102,7 @@ func TestHydratedCache(t *testing.T) { m := c.Metrics() dg := mkDescGetter(descs...) res := &descGetterTypeDescriptorResolver{dg: &dg} - td := tableDescUDT.ImmutableCopy().(*tabledesc.Immutable) + td := tableDescUDT.ImmutableCopy().(catalog.TableDescriptor) hydrated, err := c.GetHydratedTableDescriptor(ctx, td, res) require.NoError(t, err) assertMetrics(t, m, 0, 1) @@ -124,7 +124,7 @@ func TestHydratedCache(t *testing.T) { dg := mkDescGetter(descs...) res := &descGetterTypeDescriptorResolver{dg: &dg} res.unqualifiedName = true - td := tableDescUDT.ImmutableCopy().(*tabledesc.Immutable) + td := tableDescUDT.ImmutableCopy().(catalog.TableDescriptor) hydrated, err := c.GetHydratedTableDescriptor(ctx, td, res) require.NoError(t, err) assertMetrics(t, m, 0, 1) @@ -146,7 +146,7 @@ func TestHydratedCache(t *testing.T) { dg := mkDescGetter(descs...) res := &descGetterTypeDescriptorResolver{dg: &dg} res.unqualifiedName = true - td := tableDescUDT.ImmutableCopy().(*tabledesc.Immutable) + td := tableDescUDT.ImmutableCopy().(catalog.TableDescriptor) hydrated, err := c.GetHydratedTableDescriptor(ctx, td, res) require.NoError(t, err) assertMetrics(t, m, 0, 1) @@ -176,7 +176,7 @@ func TestHydratedCache(t *testing.T) { calledCh <- errCh return <-errCh } - td := tableDescUDT.ImmutableCopy().(*tabledesc.Immutable) + td := tableDescUDT.ImmutableCopy().(catalog.TableDescriptor) callOneErrCh := make(chan error, 1) go func() { @@ -202,7 +202,7 @@ func TestHydratedCache(t *testing.T) { res := &descGetterTypeDescriptorResolver{dg: &dg} mut := tabledesc.NewExistingMutable(*dg[tableUDTID].(catalog.TableDescriptor).TableDesc()) mut.MaybeIncrementVersion() - td := mut.ImmutableCopy().(*tabledesc.Immutable) + td := mut.ImmutableCopy().(catalog.TableDescriptor) hydrated, err := c.GetHydratedTableDescriptor(ctx, td, res) require.NoError(t, err) require.Nil(t, hydrated) @@ -226,7 +226,7 @@ func TestHydratedCache(t *testing.T) { // This behavior is a bit bizarre but exists to not waste the work of // hydrating the descriptor if we've already started to do it. // This case should not meaningfully arise in practice. - td := tableDescUDT.ImmutableCopy().(*tabledesc.Immutable) + td := tableDescUDT.ImmutableCopy().(catalog.TableDescriptor) { hydrated, err := c.GetHydratedTableDescriptor(ctx, td, resWithMut) require.NoError(t, err) diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index 94a28f2d8b84..853c4ea34252 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -1785,7 +1785,7 @@ func (m *Manager) watchForGossipUpdates( } _ = s.RunAsyncTask(ctx, "gossip-updates", func(ctx context.Context) { - descKeyPrefix := m.storage.codec.TablePrefix(uint32(systemschema.DescriptorTable.ID)) + descKeyPrefix := m.storage.codec.TablePrefix(uint32(systemschema.DescriptorTable.GetID())) // TODO(ajwerner): Add a mechanism to unregister this channel upon // return. NB: this call is allowed to bypass OptionalGossip because // we'll never get here after RangefeedLeases. @@ -1827,7 +1827,7 @@ func (m *Manager) watchForRangefeedUpdates( Closer: s.ShouldQuiesce(), }); r.Next(); i++ { ts := m.getResolvedTimestamp() - descKeyPrefix := m.storage.codec.TablePrefix(uint32(systemschema.DescriptorTable.ID)) + descKeyPrefix := m.storage.codec.TablePrefix(uint32(systemschema.DescriptorTable.GetID())) span := roachpb.Span{ Key: descKeyPrefix, EndKey: descKeyPrefix.PrefixEnd(), diff --git a/pkg/sql/catalog/lease/lease_internal_test.go b/pkg/sql/catalog/lease/lease_internal_test.go index 6c48fc35c2c4..4c6296d2b3cc 100644 --- a/pkg/sql/catalog/lease/lease_internal_test.go +++ b/pkg/sql/catalog/lease/lease_internal_test.go @@ -160,18 +160,18 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - var tables []tabledesc.Immutable + var tables []catalog.TableDescriptor var expiration hlc.Timestamp getLeases := func() { for i := 0; i < 3; i++ { - if err := leaseManager.AcquireFreshestFromStore(context.Background(), tableDesc.ID); err != nil { + if err := leaseManager.AcquireFreshestFromStore(context.Background(), tableDesc.GetID()); err != nil { t.Fatal(err) } - table, exp, err := leaseManager.Acquire(context.Background(), s.Clock().Now(), tableDesc.ID) + table, exp, err := leaseManager.Acquire(context.Background(), s.Clock().Now(), tableDesc.GetID()) if err != nil { t.Fatal(err) } - tables = append(tables, *table.(*tabledesc.Immutable)) + tables = append(tables, table.(catalog.TableDescriptor)) expiration = exp if err := leaseManager.Release(table); err != nil { t.Fatal(err) @@ -179,25 +179,25 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); } } getLeases() - ts := leaseManager.findDescriptorState(tableDesc.ID, false) + ts := leaseManager.findDescriptorState(tableDesc.GetID(), false) if numLeases := getNumVersions(ts); numLeases != 1 { t.Fatalf("found %d versions instead of 1", numLeases) } // Publish a new version for the table - if _, err := leaseManager.Publish(context.Background(), tableDesc.ID, func(catalog.MutableDescriptor) error { + if _, err := leaseManager.Publish(context.Background(), tableDesc.GetID(), func(catalog.MutableDescriptor) error { return nil }, nil); err != nil { t.Fatal(err) } getLeases() - ts = leaseManager.findDescriptorState(tableDesc.ID, false) + ts = leaseManager.findDescriptorState(tableDesc.GetID(), false) if numLeases := getNumVersions(ts); numLeases != 2 { t.Fatalf("found %d versions instead of 2", numLeases) } if err := purgeOldVersions( - context.Background(), kvDB, tableDesc.ID, false, 2 /* minVersion */, leaseManager); err != nil { + context.Background(), kvDB, tableDesc.GetID(), false, 2 /* minVersion */, leaseManager); err != nil { t.Fatal(err) } @@ -205,8 +205,8 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatalf("found %d versions instead of 1", numLeases) } ts.mu.Lock() - correctLease := ts.mu.active.data[0].GetID() == tables[5].ID && - ts.mu.active.data[0].GetVersion() == tables[5].Version + correctLease := ts.mu.active.data[0].GetID() == tables[5].GetID() && + ts.mu.active.data[0].GetVersion() == tables[5].GetVersion() correctExpiration := ts.mu.active.data[0].expiration == expiration ts.mu.Unlock() if !correctLease { @@ -220,8 +220,8 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); // without a lease. ts.mu.Lock() tableVersion := &descriptorVersionState{ - Descriptor: &tables[0], - expiration: tables[5].ModificationTime, + Descriptor: tables[0], + expiration: tables[5].GetModificationTime(), } ts.mu.active.insert(tableVersion) ts.mu.Unlock() @@ -229,7 +229,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatalf("found %d versions instead of 2", numLeases) } if err := purgeOldVersions( - context.Background(), kvDB, tableDesc.ID, false, 2 /* minVersion */, leaseManager); err != nil { + context.Background(), kvDB, tableDesc.GetID(), false, 2 /* minVersion */, leaseManager); err != nil { t.Fatal(err) } if numLeases := getNumVersions(ts); numLeases != 1 { @@ -276,13 +276,13 @@ CREATE TEMP TABLE t2 (temp int); for _, tableName := range []string{"t", "t2"} { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "defaultdb", tableName) lease := leaseManager.names.get( - tableDesc.ParentID, + tableDesc.GetParentID(), descpb.ID(keys.PublicSchemaID), tableName, s.Clock().Now(), ) - if lease.GetID() != tableDesc.ID { - t.Fatalf("lease has wrong ID: %d (expected: %d)", lease.GetID(), tableDesc.ID) + if lease.GetID() != tableDesc.GetID() { + t.Fatalf("lease has wrong ID: %d (expected: %d)", lease.GetID(), tableDesc.GetID()) } } } @@ -315,18 +315,18 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); } // Check that the cache has been updated. - if leaseManager.names.get(tableDesc.ParentID, tableDesc.GetParentSchemaID(), "test", s.Clock().Now()) != nil { + if leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test", s.Clock().Now()) != nil { t.Fatalf("old name still in cache") } - lease := leaseManager.names.get(tableDesc.ParentID, tableDesc.GetParentSchemaID(), "test2", s.Clock().Now()) + lease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test2", s.Clock().Now()) if lease == nil { t.Fatalf("new name not found in cache") } - if lease.GetID() != tableDesc.ID { - t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.GetID(), tableDesc.ID) + if lease.GetID() != tableDesc.GetID() { + t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.GetID(), tableDesc.GetID()) } - if err := leaseManager.Release(lease.Descriptor.(*tabledesc.Immutable)); err != nil { + if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } @@ -337,23 +337,23 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); // Re-read the descriptor, to get the new ParentID. newTableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t1", "test2") - if tableDesc.ParentID == newTableDesc.ParentID { + if tableDesc.GetParentID() == newTableDesc.GetParentID() { t.Fatalf("database didn't change") } // Check that the cache has been updated. - if leaseManager.names.get(tableDesc.ParentID, tableDesc.GetParentSchemaID(), "test2", s.Clock().Now()) != nil { + if leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test2", s.Clock().Now()) != nil { t.Fatalf("old name still in cache") } - lease = leaseManager.names.get(newTableDesc.ParentID, tableDesc.GetParentSchemaID(), "test2", s.Clock().Now()) + lease = leaseManager.names.get(newTableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test2", s.Clock().Now()) if lease == nil { t.Fatalf("new name not found in cache") } - if lease.GetID() != tableDesc.ID { - t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.GetID(), tableDesc.ID) + if lease.GetID() != tableDesc.GetID() { + t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.GetID(), tableDesc.GetID()) } - if err := leaseManager.Release(lease.Descriptor.(*tabledesc.Immutable)); err != nil { + if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } } @@ -383,10 +383,10 @@ CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); // Check the assumptions this tests makes: that there is a cache entry // (with a valid lease). - if lease := leaseManager.names.get(tableDesc.ParentID, tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()); lease == nil { - t.Fatalf("name cache has no unexpired entry for (%d, %s)", tableDesc.ParentID, tableName) + if lease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()); lease == nil { + t.Fatalf("name cache has no unexpired entry for (%d, %s)", tableDesc.GetParentID(), tableName) } else { - if err := leaseManager.Release(lease.Descriptor.(*tabledesc.Immutable)); err != nil { + if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } } @@ -394,8 +394,8 @@ CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); leaseManager.ExpireLeases(s.Clock()) // Check the name no longer resolves. - if lease := leaseManager.names.get(tableDesc.ParentID, tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()); lease != nil { - t.Fatalf("name cache has unexpired entry for (%d, %s): %s", tableDesc.ParentID, tableName, lease) + if lease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()); lease != nil { + t.Fatalf("name cache has unexpired entry for (%d, %s): %s", tableDesc.GetParentID(), tableName, lease) } } @@ -432,28 +432,28 @@ CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); } // There is a cache entry. - lease := leaseManager.names.get(tableDesc.ParentID, tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()) + lease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()) if lease == nil { - t.Fatalf("name cache has no unexpired entry for (%d, %s)", tableDesc.ParentID, tableName) + t.Fatalf("name cache has no unexpired entry for (%d, %s)", tableDesc.GetParentID(), tableName) } - tracker := removalTracker.TrackRemoval(lease.Descriptor.(*tabledesc.Immutable)) + tracker := removalTracker.TrackRemoval(lease.Descriptor) // Acquire another lease. - if _, err := acquireNodeLease(context.Background(), leaseManager, tableDesc.ID); err != nil { + if _, err := acquireNodeLease(context.Background(), leaseManager, tableDesc.GetID()); err != nil { t.Fatal(err) } // Check the name resolves to the new lease. - newLease := leaseManager.names.get(tableDesc.ParentID, tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()) + newLease := leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), tableName, s.Clock().Now()) if newLease == nil { - t.Fatalf("name cache doesn't contain entry for (%d, %s)", tableDesc.ParentID, tableName) + t.Fatalf("name cache doesn't contain entry for (%d, %s)", tableDesc.GetParentID(), tableName) } if newLease == lease { t.Fatalf("same lease %s", newLease.expiration.GoTime()) } - if err := leaseManager.Release(lease.Descriptor.(*tabledesc.Immutable)); err != nil { + if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } @@ -462,7 +462,7 @@ CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - if err := leaseManager.Release(lease.Descriptor.(*tabledesc.Immutable)); err != nil { + if err := leaseManager.Release(lease.Descriptor); err != nil { t.Fatal(err) } } @@ -489,7 +489,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Check that we cannot get the table by a different name. - if leaseManager.names.get(tableDesc.ParentID, tableDesc.GetParentSchemaID(), "tEsT", s.Clock().Now()) != nil { + if leaseManager.names.get(tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "tEsT", s.Clock().Now()) != nil { t.Fatalf("lease manager incorrectly found table with different case") } } @@ -529,7 +529,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); table, _, err := leaseManager.AcquireByName( ctx, leaseManager.storage.clock.Now(), - tableDesc.ParentID, + tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test", ) @@ -544,7 +544,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); // Release. // tableChan acts as a barrier, synchronizing the two routines at every // iteration. - tableChan := make(chan *tabledesc.Immutable) + tableChan := make(chan catalog.TableDescriptor) errChan := make(chan error) go func() { for table := range tableChan { @@ -559,14 +559,14 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); desc, _, err := leaseManager.AcquireByName( ctx, timestamp, - tableDesc.ParentID, + tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test", ) if err != nil { t.Fatal(err) } - table := desc.(*tabledesc.Immutable) + table := desc.(catalog.TableDescriptor) // This test will need to wait until leases are removed from the store // before creating new leases because the jitter used in the leases' // expiration causes duplicate key errors when trying to create new @@ -581,7 +581,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); tableByName, _, err := leaseManager.AcquireByName( ctx, timestamp, - tableDesc.ParentID, + tableDesc.GetParentID(), tableDesc.GetParentSchemaID(), "test", ) @@ -642,10 +642,10 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); for i := 0; i < numRoutines; i++ { go func() { defer wg.Done() - if err := leaseManager.AcquireFreshestFromStore(context.Background(), tableDesc.ID); err != nil { + if err := leaseManager.AcquireFreshestFromStore(context.Background(), tableDesc.GetID()); err != nil { t.Error(err) } - table, _, err := leaseManager.Acquire(context.Background(), s.Clock().Now(), tableDesc.ID) + table, _, err := leaseManager.Acquire(context.Background(), s.Clock().Now(), tableDesc.GetID()) if err != nil { t.Error(err) } @@ -695,7 +695,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); for i := 0; i < numRoutines; i++ { go func() { defer wg.Done() - table, _, err := leaseManager.Acquire(context.Background(), now, tableDesc.ID) + table, _, err := leaseManager.Acquire(context.Background(), now, tableDesc.GetID()) if err != nil { t.Error(err) } @@ -735,7 +735,7 @@ func TestLeaseAcquireAndReleaseConcurrently(t *testing.T) { // Result is a struct for moving results to the main result routine. type Result struct { - table *tabledesc.Immutable + table catalog.TableDescriptor exp hlc.Timestamp err error } @@ -749,7 +749,7 @@ func TestLeaseAcquireAndReleaseConcurrently(t *testing.T) { acquireChan chan Result, ) { table, e, err := m.Acquire(ctx, m.storage.clock.Now(), descID) - acquireChan <- Result{err: err, exp: e, table: table.(*tabledesc.Immutable)} + acquireChan <- Result{err: err, exp: e, table: table.(catalog.TableDescriptor)} } testCases := []struct { @@ -852,7 +852,7 @@ func TestLeaseAcquireAndReleaseConcurrently(t *testing.T) { return } table, e, err := m.Acquire(ctx, s.Clock().Now(), descID) - acquireChan <- Result{err: err, exp: e, table: table.(*tabledesc.Immutable)} + acquireChan <- Result{err: err, exp: e, table: table.(catalog.TableDescriptor)} }(ctx, leaseManager, acquireResultChan) } else { diff --git a/pkg/sql/catalog/lease/lease_test.go b/pkg/sql/catalog/lease/lease_test.go index 429618905475..ad6b566db779 100644 --- a/pkg/sql/catalog/lease/lease_test.go +++ b/pkg/sql/catalog/lease/lease_test.go @@ -581,7 +581,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); tableDesc := catalogkv.TestingGetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "test", "t") // try to acquire at a bogus version to make sure we don't get back a lease we // already had. - _, _, err = t.acquireMinVersion(1, tableDesc.ID, tableDesc.Version+1) + _, _, err = t.acquireMinVersion(1, tableDesc.GetID(), tableDesc.GetVersion()+1) if !testutils.IsError(err, "descriptor is being dropped") { t.Fatalf("got a different error than expected: %v", err) } @@ -645,11 +645,11 @@ CREATE TABLE test.t(a INT PRIMARY KEY); tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") ctx := context.Background() - lease1, _, err := acquire(ctx, s.(*server.TestServer), tableDesc.ID) + lease1, _, err := acquire(ctx, s.(*server.TestServer), tableDesc.GetID()) if err != nil { t.Fatal(err) } - lease2, _, err := acquire(ctx, s.(*server.TestServer), tableDesc.ID) + lease2, _, err := acquire(ctx, s.(*server.TestServer), tableDesc.GetID()) if err != nil { t.Fatal(err) } @@ -659,7 +659,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); // Also install a way to wait for the config update to be processed. mu.Lock() clearSchemaChangers = true - waitTableID = tableDesc.ID + waitTableID = tableDesc.GetID() mu.Unlock() // DROP the table @@ -672,7 +672,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); <-deleted // We should still be able to acquire, because we have an active lease. - lease3, _, err := acquire(ctx, s.(*server.TestServer), tableDesc.ID) + lease3, _, err := acquire(ctx, s.(*server.TestServer), tableDesc.GetID()) if err != nil { t.Fatal(err) } @@ -688,7 +688,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); t.Fatal(err) } // Now we shouldn't be able to acquire any more. - _, _, err = acquire(ctx, s.(*server.TestServer), tableDesc.ID) + _, _, err = acquire(ctx, s.(*server.TestServer), tableDesc.GetID()) if !testutils.IsError(err, "descriptor is being dropped") { t.Fatalf("got a different error than expected: %v", err) } @@ -742,7 +742,7 @@ CREATE TABLE t.foo (v INT); } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "foo") - atomic.StoreInt64(&tableID, int64(tableDesc.ID)) + atomic.StoreInt64(&tableID, int64(tableDesc.GetID())) if _, err := sqlDB.Exec(` SELECT * FROM t.foo; @@ -872,7 +872,7 @@ CREATE TABLE t.foo (v INT); } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "foo") - atomic.StoreInt64(&tableID, int64(tableDesc.ID)) + atomic.StoreInt64(&tableID, int64(tableDesc.GetID())) tx, err := sqlDB.Begin() if err != nil { @@ -1114,13 +1114,13 @@ INSERT INTO t.kv VALUES ('a', 'b'); // Allow async schema change waiting for GC to complete (when dropping an // index) and clear the index keys. - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()); err != nil { t.Fatal(err) } testutils.SucceedsSoon(t, func() error { - if tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv"); len(tableDesc.GCMutations) != 0 { - return errors.Errorf("%d gc mutations remaining", len(tableDesc.GCMutations)) + if tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv"); len(tableDesc.GetGCMutations()) != 0 { + return errors.Errorf("%d gc mutations remaining", len(tableDesc.GetGCMutations())) } return nil }) @@ -1183,7 +1183,7 @@ INSERT INTO t.timestamp VALUES ('a', 'b'); if !updated { leaseMgr := s.LeaseManager().(*lease.Manager) if _, err := leaseMgr.Publish( - context.Background(), tableDesc.ID, func(catalog.MutableDescriptor) error { + context.Background(), tableDesc.GetID(), func(catalog.MutableDescriptor) error { // Do nothing: increments the version. return nil }, nil); err != nil { @@ -1229,8 +1229,8 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); } tableDesc := catalogkv.TestingGetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test") - dbID := tableDesc.ParentID - tableName := tableDesc.Name + dbID := tableDesc.GetParentID() + tableName := tableDesc.GetName() leaseManager := t.node(1) // Acquire the lease so it is put into the nameCache. @@ -1314,7 +1314,7 @@ CREATE TABLE t.test2 (); test1Desc := catalogkv.TestingGetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test1") test2Desc := catalogkv.TestingGetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test2") - dbID := test2Desc.ParentID + dbID := test2Desc.GetParentID() // Acquire a lease on test1 by name. ts1, eo1, err := t.node(1).AcquireByName( @@ -1334,7 +1334,7 @@ CREATE TABLE t.test2 (); } // Acquire a lease on test2 by ID. - ts2, eo2, err := t.node(1).Acquire(ctx, t.server.Clock().Now(), test2Desc.ID) + ts2, eo2, err := t.node(1).Acquire(ctx, t.server.Clock().Now(), test2Desc.GetID()) if err != nil { t.Fatal(err) } else if err := t.release(1, ts2); err != nil { @@ -1383,7 +1383,7 @@ CREATE TABLE t.test2 (); // Acquire another lease by ID on test2. At first this will be the same // lease, but eventually we will asynchronously renew a lease and our // acquire will get a newer lease. - ts2, en2, err := t.node(1).Acquire(ctx, t.server.Clock().Now(), test2Desc.ID) + ts2, en2, err := t.node(1).Acquire(ctx, t.server.Clock().Now(), test2Desc.GetID()) if err != nil { t.Fatal(err) } @@ -1447,8 +1447,8 @@ CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") - if tableDesc.Version != 1 { - t.Fatalf("invalid version %d", tableDesc.Version) + if tableDesc.GetVersion() != 1 { + t.Fatalf("invalid version %d", tableDesc.GetVersion()) } tx, err := sqlDB.Begin() @@ -1468,8 +1468,8 @@ CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); // The first schema change will succeed and increment the version. tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv1") - if tableDesc.Version != 2 { - t.Fatalf("invalid version %d", tableDesc.Version) + if tableDesc.GetVersion() != 2 { + t.Fatalf("invalid version %d", tableDesc.GetVersion()) } if l := atomic.LoadInt64(&violations); l > 0 { @@ -1498,8 +1498,8 @@ CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); // doesn't rollback the transaction this descriptor read will // hang. tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv1") - if tableDesc.Version != 2 { - t.Fatalf("invalid version %d", tableDesc.Version) + if tableDesc.GetVersion() != 2 { + t.Fatalf("invalid version %d", tableDesc.GetVersion()) } // Transaction successfully used the old version. @@ -1509,8 +1509,8 @@ CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); wg.Wait() tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv2") - if tableDesc.Version != 3 { - t.Fatalf("invalid version %d", tableDesc.Version) + if tableDesc.GetVersion() != 3 { + t.Fatalf("invalid version %d", tableDesc.GetVersion()) } } @@ -1550,8 +1550,8 @@ INSERT INTO t.kv VALUES ('a', 'b'); } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") - if tableDesc.Version != 1 { - t.Fatalf("invalid version %d", tableDesc.Version) + if tableDesc.GetVersion() != 1 { + t.Fatalf("invalid version %d", tableDesc.GetVersion()) } tx, err := sqlDB.Begin() @@ -1671,7 +1671,7 @@ CREATE TABLE t.test0 (k CHAR PRIMARY KEY, v CHAR); if err != nil { t.Fatalf("error while publishing: %v", err) } - table := desc.(*tabledesc.Immutable) + table := desc.(catalog.TableDescriptor) // Wait a little time to give a chance to other goroutines to // race past. @@ -1683,10 +1683,10 @@ CREATE TABLE t.test0 (k CHAR PRIMARY KEY, v CHAR); // This checks that the modification timestamp is not lying about // the transaction commit time (and that the txn commit time wasn't // bumped past it). - log.Infof(ctx, "checking version %d", table.Version) + log.Infof(ctx, "checking version %d", table.GetVersion()) txn := kv.NewTxn(ctx, t.kvDB, roachpb.NodeID(0)) // Make the txn look back at the known modification timestamp. - txn.SetFixedTimestamp(ctx, table.ModificationTime) + txn.SetFixedTimestamp(ctx, table.GetModificationTime()) // Look up the descriptor. descKey := catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, descID) @@ -1698,9 +1698,9 @@ CREATE TABLE t.test0 (k CHAR PRIMARY KEY, v CHAR); // Look at the descriptor that comes back from the database. dbTable := descpb.TableFromDescriptor(dbDesc, ts) - if dbTable.Version != table.Version || dbTable.ModificationTime != table.ModificationTime { + if dbTable.Version != table.GetVersion() || dbTable.ModificationTime != table.GetModificationTime() { t.Fatalf("db has version %d at ts %s, expected version %d at ts %s", - dbTable.Version, dbTable.ModificationTime, table.Version, table.ModificationTime) + dbTable.Version, dbTable.ModificationTime, table.GetVersion(), table.GetModificationTime()) } } wg.Done() @@ -1772,7 +1772,7 @@ CREATE TABLE t.test2 (); test1Desc := catalogkv.TestingGetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test2") test2Desc := catalogkv.TestingGetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test2") - dbID := test2Desc.ParentID + dbID := test2Desc.GetParentID() atomic.StoreInt32(&testAcquisitionBlockCount, 0) @@ -1803,7 +1803,7 @@ CREATE TABLE t.test2 (); } // Acquire a lease on test2 by ID. - ts2, _, err := t.node(1).Acquire(ctx, t.server.Clock().Now(), test2Desc.ID) + ts2, _, err := t.node(1).Acquire(ctx, t.server.Clock().Now(), test2Desc.GetID()) if err != nil { t.Fatal(err) } else if err := t.release(1, ts2); err != nil { @@ -1969,7 +1969,7 @@ CREATE TABLE t.after (k CHAR PRIMARY KEY, v CHAR); beforeDesc := catalogkv.TestingGetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "before") afterDesc := catalogkv.TestingGetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "after") - dbID := beforeDesc.ParentID + dbID := beforeDesc.GetParentID() // Acquire a lease on "before" by name. beforeTable, _, err := t.node(1).AcquireByName( @@ -2002,14 +2002,14 @@ CREATE TABLE t.after (k CHAR PRIMARY KEY, v CHAR); } else if err := t.release(1, afterTable); err != nil { t.Fatal(err) } - t.expectLeases(beforeDesc.ID, "/1/1") - t.expectLeases(afterDesc.ID, "/1/1") + t.expectLeases(beforeDesc.GetID(), "/1/1") + t.expectLeases(afterDesc.GetID(), "/1/1") // Call DeleteOrphanedLeases() with the server startup time. t.node(1).DeleteOrphanedLeases(now) // Orphaned lease is gone. - t.expectLeases(beforeDesc.ID, "") - t.expectLeases(afterDesc.ID, "/1/1") + t.expectLeases(beforeDesc.GetID(), "") + t.expectLeases(afterDesc.GetID(), "/1/1") } // Test that acquiring a lease doesn't block on other transactions performing @@ -2416,7 +2416,7 @@ func TestLeaseWithOfflineTables(t *testing.T) { require.NoError(t, err) desc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - atomic.StoreUint32(&descID, uint32(desc.ID)) + atomic.StoreUint32(&descID, uint32(desc.GetID())) // Sets table descriptor state and waits for that change to propagate to the // lease manager's refresh worker. diff --git a/pkg/sql/catalog/resolver/resolver.go b/pkg/sql/catalog/resolver/resolver.go index 38e5a5316438..a5243a9ac826 100644 --- a/pkg/sql/catalog/resolver/resolver.go +++ b/pkg/sql/catalog/resolver/resolver.go @@ -47,7 +47,7 @@ type SchemaResolver interface { CurrentSearchPath() sessiondata.SearchPath CommonLookupFlags(required bool) tree.CommonLookupFlags ObjectLookupFlags(required bool, requireMutable bool) tree.ObjectLookupFlags - LookupTableByID(ctx context.Context, id descpb.ID) (*tabledesc.Immutable, error) + LookupTableByID(ctx context.Context, id descpb.ID) (catalog.TableDescriptor, error) tree.TypeReferenceResolver } @@ -85,7 +85,7 @@ func GetObjectNames( // if no object is found. func ResolveExistingTableObject( ctx context.Context, sc SchemaResolver, tn *tree.TableName, lookupFlags tree.ObjectLookupFlags, -) (res *tabledesc.Immutable, err error) { +) (res catalog.TableDescriptor, err error) { // TODO: As part of work for #34240, an UnresolvedObjectName should be // passed as an argument to this function. un := tn.ToUnresolvedObjectName() @@ -94,7 +94,7 @@ func ResolveExistingTableObject( return nil, err } tn.ObjectNamePrefix = prefix - return desc.(*tabledesc.Immutable), nil + return desc.(catalog.TableDescriptor), nil } // ResolveMutableExistingTableObject looks up an existing mutable object. @@ -208,7 +208,7 @@ func ResolveExistingObject( return descI.(*tabledesc.Mutable), prefix, nil } - return descI.(*tabledesc.Immutable), prefix, nil + return descI.(catalog.TableDescriptor), prefix, nil default: return nil, prefix, errors.AssertionFailedf( "unknown desired object kind %d", lookupFlags.DesiredObjectKind) diff --git a/pkg/sql/catalog/tabledesc/BUILD.bazel b/pkg/sql/catalog/tabledesc/BUILD.bazel index 412f00d8e509..2cb7ec229b69 100644 --- a/pkg/sql/catalog/tabledesc/BUILD.bazel +++ b/pkg/sql/catalog/tabledesc/BUILD.bazel @@ -75,6 +75,7 @@ go_test( "//pkg/util/leaktest", "//pkg/util/protoutil", "//pkg/util/randutil", + "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", "@com_github_stretchr_testify//require", "@in_gopkg_yaml_v2//:yaml_v2", diff --git a/pkg/sql/catalog/tabledesc/helpers_test.go b/pkg/sql/catalog/tabledesc/helpers_test.go index 3000bed3af97..024f573dfcef 100644 --- a/pkg/sql/catalog/tabledesc/helpers_test.go +++ b/pkg/sql/catalog/tabledesc/helpers_test.go @@ -14,14 +14,43 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/errors" ) -func (desc *Immutable) ValidateCrossReferences(ctx context.Context, dg catalog.DescGetter) error { - return desc.validateCrossReferences(ctx, dg) +func ValidateTable(ctx context.Context, immI catalog.TableDescriptor) error { + imm, ok := immI.(*immutable) + if !ok { + return errors.Errorf("expected immutable descriptor") + } + return imm.ValidateTable(ctx) } -func (desc *Immutable) ValidatePartitioning() error { - return desc.validatePartitioning() +func ValidateCrossReferences( + ctx context.Context, dg catalog.DescGetter, immI catalog.TableDescriptor, +) error { + imm, ok := immI.(*immutable) + if !ok { + return errors.Errorf("expected immutable descriptor") + } + return imm.validateCrossReferences(ctx, dg) +} + +func ValidatePartitioning(immI catalog.TableDescriptor) error { + imm, ok := immI.(*immutable) + if !ok { + return errors.Errorf("expected immutable descriptor") + } + return imm.validatePartitioning() +} + +func GetPostDeserializationChanges( + immI catalog.TableDescriptor, +) (PostDeserializationTableDescriptorChanges, error) { + imm, ok := immI.(*immutable) + if !ok { + return PostDeserializationTableDescriptorChanges{}, errors.Errorf("expected immutable descriptor") + } + return imm.GetPostDeserializationChanges(), nil } var FitColumnToFamily = fitColumnToFamily diff --git a/pkg/sql/catalog/tabledesc/safe_format.go b/pkg/sql/catalog/tabledesc/safe_format.go index db975755e374..bc97db69e7cd 100644 --- a/pkg/sql/catalog/tabledesc/safe_format.go +++ b/pkg/sql/catalog/tabledesc/safe_format.go @@ -16,9 +16,9 @@ import ( "github.com/cockroachdb/redact" ) -// SafeMessage makes Immutable a SafeMessager. -func (desc *Immutable) SafeMessage() string { - return formatSafeTableDesc("tabledesc.Immutable", desc) +// SafeMessage makes immutable a SafeMessager. +func (desc *immutable) SafeMessage() string { + return formatSafeTableDesc("tabledesc.immutable", desc) } // SafeMessage makes Mutable a SafeMessager. @@ -108,7 +108,7 @@ func formatSafeColumn( func formatSafeTableIndexes(w *redact.StringBuilder, desc catalog.TableDescriptor) { w.Printf(", PrimaryIndex: %d", desc.GetPrimaryIndexID()) - w.Printf(", NextIndexID: %d", desc.TableDesc().NextIndexID) + w.Printf(", NextIndexID: %d", desc.GetNextIndexID()) w.Printf(", Indexes: [") _ = catalog.ForEachActiveIndex(desc, func(idx catalog.Index) error { if !idx.Primary() { @@ -286,7 +286,7 @@ func formatSafeTableMutationJobs(w *redact.StringBuilder, td catalog.TableDescri } func formatSafeMutations(w *redact.StringBuilder, td catalog.TableDescriptor) { - mutations := td.TableDesc().Mutations + mutations := td.GetMutations() for i := range mutations { w.Printf(", ") m := &mutations[i] diff --git a/pkg/sql/catalog/tabledesc/safe_format_test.go b/pkg/sql/catalog/tabledesc/safe_format_test.go index cc5d7c38116d..4f841a072d3a 100644 --- a/pkg/sql/catalog/tabledesc/safe_format_test.go +++ b/pkg/sql/catalog/tabledesc/safe_format_test.go @@ -59,7 +59,7 @@ func TestSafeMessage(t *testing.T) { id: 12, parentID: 21, schema: "CREATE TABLE foo (i INT PRIMARY KEY, j INT, j_str STRING AS (j::STRING) STORED, INDEX (j_str))", - exp: `tabledesc.Immutable: {` + + exp: `tabledesc.immutable: {` + `ID: 12, Version: 1, ModificationTime: "1.000000000,0", ` + `ParentID: 21, ParentSchemaID: 29, State: PUBLIC, ` + `NextColumnID: 6, ` + @@ -236,7 +236,7 @@ func TestSafeMessage(t *testing.T) { id: 12, parentID: 21, schema: "CREATE TABLE foo ()", - exp: "tabledesc.Immutable: {" + + exp: "tabledesc.immutable: {" + "ID: 12, Version: 1, " + "ModificationTime: \"0,0\", " + "ParentID: 21, ParentSchemaID: 29, " + diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index 07d4571a71aa..f57b02637712 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -107,13 +107,13 @@ func NewFilledInExistingMutable( return &Mutable{wrapper: w, ClusterVersion: *tbl}, nil } -// MakeImmutable returns an Immutable from the given TableDescriptor. -func MakeImmutable(tbl descpb.TableDescriptor) Immutable { +// makeImmutable returns an immutable from the given TableDescriptor. +func makeImmutable(tbl descpb.TableDescriptor) immutable { publicAndNonPublicCols := tbl.Columns readableCols := tbl.Columns - desc := Immutable{wrapper: wrapper{TableDescriptor: tbl, indexCache: newIndexCache(&tbl)}} + desc := immutable{wrapper: wrapper{TableDescriptor: tbl, indexCache: newIndexCache(&tbl)}} if len(tbl.Mutations) > 0 { publicAndNonPublicCols = make([]descpb.ColumnDescriptor, 0, len(tbl.Columns)+len(tbl.Mutations)) @@ -171,35 +171,35 @@ func MakeImmutable(tbl descpb.TableDescriptor) Immutable { return desc } -// NewImmutable returns a Immutable from the given TableDescriptor. +// NewImmutable returns a immutable from the given TableDescriptor. // This function assumes that this descriptor has not been modified from the // version stored in the key-value store. -func NewImmutable(tbl descpb.TableDescriptor) *Immutable { +func NewImmutable(tbl descpb.TableDescriptor) catalog.TableDescriptor { return NewImmutableWithIsUncommittedVersion(tbl, false /* isUncommittedVersion */) } -// NewImmutableWithIsUncommittedVersion returns a Immutable from the given +// NewImmutableWithIsUncommittedVersion returns a immutable from the given // TableDescriptor and allows the caller to mark the table as corresponding to // an uncommitted version. This should be used when constructing a new copy of -// an Immutable from an existing descriptor which may have a new version. +// an immutable from an existing descriptor which may have a new version. func NewImmutableWithIsUncommittedVersion( tbl descpb.TableDescriptor, isUncommittedVersion bool, -) *Immutable { - desc := MakeImmutable(tbl) +) catalog.TableDescriptor { + desc := makeImmutable(tbl) desc.isUncommittedVersion = isUncommittedVersion return &desc } -// NewFilledInImmutable will construct an Immutable and potentially perform +// NewFilledInImmutable will construct an immutable and potentially perform // post-deserialization upgrades. func NewFilledInImmutable( ctx context.Context, dg catalog.DescGetter, tbl *descpb.TableDescriptor, -) (*Immutable, error) { +) (catalog.TableDescriptor, error) { changes, err := maybeFillInDescriptor(ctx, dg, tbl, false /* skipFKsWithNoMatchingTable */) if err != nil { return nil, err } - desc := MakeImmutable(*tbl) + desc := makeImmutable(*tbl) desc.postDeserializationChanges = changes return &desc, nil } @@ -804,7 +804,7 @@ func ForEachExprStringInTableDesc(descI catalog.TableDescriptor, f func(expr *st switch descV := descI.(type) { case *wrapper: desc = descV - case *Immutable: + case *immutable: desc = &descV.wrapper case *Mutable: desc = &descV.wrapper @@ -1433,7 +1433,7 @@ func (desc *wrapper) validateCrossReferences(ctx context.Context, dg catalog.Des unupgradedFKsPresent := false if err := catalog.ForEachIndex(referencedTable, catalog.IndexOpts{}, func(referencedIdx catalog.Index) error { if found { - // TODO (lucy): If we ever revisit the tabledesc.Immutable methods, add + // TODO (lucy): If we ever revisit the tabledesc.immutable methods, add // a way to break out of the index loop. return nil } @@ -1512,7 +1512,7 @@ func (desc *wrapper) validateCrossReferences(ctx context.Context, dg catalog.Des unupgradedFKsPresent := false if err := catalog.ForEachIndex(originTable, catalog.IndexOpts{}, func(originIdx catalog.Index) error { if found { - // TODO (lucy): If we ever revisit the tabledesc.Immutable methods, add + // TODO (lucy): If we ever revisit the tabledesc.immutable methods, add // a way to break out of the index loop. return nil } @@ -2933,15 +2933,15 @@ func (desc *wrapper) ContainsUserDefinedTypes() bool { // ContainsUserDefinedTypes returns whether or not this table descriptor has // any columns of user defined types. -// This method is re-implemented for Immutable only for the purpose of calling +// This method is re-implemented for immutable only for the purpose of calling // the correct GetColumnOrdinalsWithUserDefinedTypes() method on desc. -func (desc *Immutable) ContainsUserDefinedTypes() bool { +func (desc *immutable) ContainsUserDefinedTypes() bool { return len(desc.GetColumnOrdinalsWithUserDefinedTypes()) > 0 } // GetColumnOrdinalsWithUserDefinedTypes returns a slice of column ordinals // of columns that contain user defined types. -func (desc *Immutable) GetColumnOrdinalsWithUserDefinedTypes() []int { +func (desc *immutable) GetColumnOrdinalsWithUserDefinedTypes() []int { return desc.columnsWithUDTs } @@ -2965,10 +2965,10 @@ func (desc *wrapper) UserDefinedTypeColsHaveSameVersion(otherDesc catalog.TableD // with user defined type metadata have the same versions of metadata as in the // other descriptor. Note that this function is only valid on two descriptors // representing the same table at the same version. -// This method is re-implemented for Immutable only for the purpose of calling +// This method is re-implemented for immutable only for the purpose of calling // the correct DeletableColumns() and GetColumnOrdinalsWithUserDefinedTypes() // methods on desc. -func (desc *Immutable) UserDefinedTypeColsHaveSameVersion(otherDesc catalog.TableDescriptor) bool { +func (desc *immutable) UserDefinedTypeColsHaveSameVersion(otherDesc catalog.TableDescriptor) bool { thisCols := desc.DeletableColumns() otherCols := otherDesc.DeletableColumns() for _, idx := range desc.GetColumnOrdinalsWithUserDefinedTypes() { @@ -2980,21 +2980,6 @@ func (desc *Immutable) UserDefinedTypeColsHaveSameVersion(otherDesc catalog.Tabl return true } -// FindReadableColumnByID finds the readable column with specified ID. The -// column may be undergoing a schema change and is marked nullable regardless -// of its configuration. It returns true if the column is undergoing a -// schema change. -func (desc *Immutable) FindReadableColumnByID( - id descpb.ColumnID, -) (*descpb.ColumnDescriptor, bool, error) { - for i, c := range desc.ReadableColumns() { - if c.ID == id { - return &c, i >= len(desc.Columns), nil - } - } - return nil, false, fmt.Errorf("column-id \"%d\" does not exist", id) -} - // FindFamilyByID finds the family with specified ID. func (desc *wrapper) FindFamilyByID(id descpb.FamilyID) (*descpb.ColumnFamilyDescriptor, error) { for i := range desc.Families { @@ -3250,27 +3235,6 @@ func (desc *Mutable) RenameConstraint( } } -// FindIndexByIndexIdx returns an active index with the specified -// index's index which has a domain of [0, # of secondary indexes] and whether -// the index is a secondary index. -// The primary index has an index of 0 and the first secondary index -// (if it exists) has an index of 1. -func (desc *wrapper) FindIndexByIndexIdx( - indexIdx int, -) (index *descpb.IndexDescriptor, isSecondary bool, err error) { - // indexIdx is 0 for the primary index, or 1 to for a - // secondary index. - if indexIdx < 0 || indexIdx > len(desc.Indexes) { - return nil, false, errors.Errorf("invalid indexIdx %d", indexIdx) - } - - if indexIdx > 0 { - return &desc.Indexes[indexIdx-1], true, nil - } - - return &desc.PrimaryIndex, false, nil -} - // GetIndexMutationCapabilities returns: // 1. Whether the index is a mutation // 2. if so, is it in state DELETE_AND_WRITE_ONLY @@ -3493,10 +3457,10 @@ func (desc *Mutable) MakeMutationComplete(m descpb.DescriptorMutation) error { if err != nil { return err } - oldIndex, _, err := desc.FindIndexByIndexIdx(oldIndexIdx) - if err != nil { - return err + if oldIndexIdx >= len(desc.ActiveIndexes()) { + return errors.Errorf("invalid indexIdx %d", oldIndexIdx) } + oldIndex := desc.ActiveIndexes()[oldIndexIdx].IndexDesc() oldIndexCopy := protoutil.Clone(oldIndex).(*descpb.IndexDescriptor) newIndex.IndexDesc().Name = oldIndexCopy.Name // Splice out old index from the indexes list. @@ -3837,11 +3801,13 @@ const IgnoreConstraints = false const IncludeConstraints = true // MakeFirstMutationPublic creates a Mutable from the -// Immutable by making the first mutation public. +// immutable by making the first mutation public. // This is super valuable when trying to run SQL over data associated // with a schema mutation that is still not yet public: Data validation, // error reporting. -func (desc *wrapper) MakeFirstMutationPublic(includeConstraints bool) (*Mutable, error) { +func (desc *wrapper) MakeFirstMutationPublic( + includeConstraints bool, +) (catalog.TableDescriptor, error) { // Clone the ImmutableTable descriptor because we want to create an ImmutableCopy one. table := NewExistingMutable(*protoutil.Clone(desc.TableDesc()).(*descpb.TableDescriptor)) mutationID := desc.Mutations[0].MutationID @@ -4177,22 +4143,22 @@ func (desc *wrapper) FindAllReferences() (map[descpb.ID]struct{}, error) { // ActiveChecks returns a list of all check constraints that should be enforced // on writes (including constraints being added/validated). The columns // referenced by the returned checks are writable, but not necessarily public. -func (desc *Immutable) ActiveChecks() []descpb.TableDescriptor_CheckConstraint { +func (desc *immutable) ActiveChecks() []descpb.TableDescriptor_CheckConstraint { return desc.allChecks } // WritableColumns returns a list of public and write-only mutation columns. -func (desc *Immutable) WritableColumns() []descpb.ColumnDescriptor { +func (desc *immutable) WritableColumns() []descpb.ColumnDescriptor { return desc.publicAndNonPublicCols[:len(desc.Columns)+desc.writeOnlyColCount] } // DeletableColumns returns a list of public and non-public columns. -func (desc *Immutable) DeletableColumns() []descpb.ColumnDescriptor { +func (desc *immutable) DeletableColumns() []descpb.ColumnDescriptor { return desc.publicAndNonPublicCols } // MutationColumns returns a list of mutation columns. -func (desc *Immutable) MutationColumns() []descpb.ColumnDescriptor { +func (desc *immutable) MutationColumns() []descpb.ColumnDescriptor { return desc.publicAndNonPublicCols[len(desc.Columns):] } diff --git a/pkg/sql/catalog/tabledesc/structured_test.go b/pkg/sql/catalog/tabledesc/structured_test.go index 7038c791a709..3b0de3a118b2 100644 --- a/pkg/sql/catalog/tabledesc/structured_test.go +++ b/pkg/sql/catalog/tabledesc/structured_test.go @@ -932,7 +932,7 @@ func TestValidateTableDesc(t *testing.T) { for i, d := range testData { t.Run(d.err, func(t *testing.T) { desc := NewImmutable(d.desc) - if err := desc.ValidateTable(ctx); err == nil { + if err := ValidateTable(ctx, desc); err == nil { t.Errorf("%d: expected \"%s\", but found success: %+v", i, d.err, d.desc) } else if d.err != err.Error() && "internal error: "+d.err != err.Error() { t.Errorf("%d: expected \"%s\", but found \"%+v\"", i, d.err, err) @@ -1350,7 +1350,7 @@ func TestValidateCrossTableReferences(t *testing.T) { descs[otherDesc.ID] = NewImmutable(otherDesc) } desc := NewImmutable(test.desc) - if err := desc.ValidateCrossReferences(ctx, descs); err == nil { + if err := ValidateCrossReferences(ctx, descs, desc); err == nil { if test.err != "" { t.Errorf("%d: expected \"%s\", but found success: %+v", i, test.err, test.desc) } @@ -1561,7 +1561,7 @@ func TestValidatePartitioning(t *testing.T) { for i, test := range tests { t.Run(test.err, func(t *testing.T) { desc := NewImmutable(test.desc) - err := desc.ValidatePartitioning() + err := ValidatePartitioning(desc) if !testutils.IsError(err, test.err) { t.Errorf(`%d: got "%v" expected "%v"`, i, err, test.err) } @@ -1687,7 +1687,7 @@ func TestMaybeUpgradeFormatVersion(t *testing.T) { tests := []struct { desc descpb.TableDescriptor expUpgrade bool - verify func(int, *Immutable) // nil means no extra verification. + verify func(int, catalog.TableDescriptor) // nil means no extra verification. }{ { desc: descpb.TableDescriptor{ @@ -1698,8 +1698,8 @@ func TestMaybeUpgradeFormatVersion(t *testing.T) { Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), }, expUpgrade: true, - verify: func(i int, desc *Immutable) { - if len(desc.Families) == 0 { + verify: func(i int, desc catalog.TableDescriptor) { + if len(desc.GetFamilies()) == 0 { t.Errorf("%d: expected families to be set, but it was empty", i) } }, @@ -1720,7 +1720,9 @@ func TestMaybeUpgradeFormatVersion(t *testing.T) { for i, test := range tests { desc, err := NewFilledInImmutable(context.Background(), nil, &test.desc) require.NoError(t, err) - upgraded := desc.GetPostDeserializationChanges().UpgradedFormatVersion + changes, err := GetPostDeserializationChanges(desc) + require.NoError(t, err) + upgraded := changes.UpgradedFormatVersion if upgraded != test.expUpgrade { t.Fatalf("%d: expected upgraded=%t, but got upgraded=%t", i, test.expUpgrade, upgraded) } diff --git a/pkg/sql/catalog/tabledesc/table_desc.go b/pkg/sql/catalog/tabledesc/table_desc.go index 0243f577b5cd..79311a3c7f8f 100644 --- a/pkg/sql/catalog/tabledesc/table_desc.go +++ b/pkg/sql/catalog/tabledesc/table_desc.go @@ -18,19 +18,19 @@ import ( "github.com/cockroachdb/errors" ) -var _ catalog.TableDescriptor = (*Immutable)(nil) +var _ catalog.TableDescriptor = (*immutable)(nil) var _ catalog.TableDescriptor = (*Mutable)(nil) var _ catalog.MutableDescriptor = (*Mutable)(nil) var _ catalog.TableDescriptor = (*wrapper)(nil) // wrapper is the base implementation of the catalog.Descriptor -// interface, which is overloaded by Immutable and Mutable. +// interface, which is overloaded by immutable and Mutable. type wrapper struct { descpb.TableDescriptor // indexCache, when not nil, points to a struct containing precomputed // catalog.Index slices. This can therefore only be set when creating an - // Immutable. + // immutable. indexCache *indexCache postDeserializationChanges PostDeserializationTableDescriptorChanges @@ -132,10 +132,10 @@ func (desc *wrapper) GetColumnOrdinalsWithUserDefinedTypes() []int { return ords } -// Immutable is a custom type for TableDescriptors +// immutable is a custom type for TableDescriptors // It holds precomputed values and the underlying TableDescriptor // should be const. -type Immutable struct { +type immutable struct { wrapper // publicAndNonPublicCols is a list of public and non-public columns. @@ -166,7 +166,7 @@ type Immutable struct { } // IsUncommittedVersion implements the Descriptor interface. -func (desc *Immutable) IsUncommittedVersion() bool { +func (desc *immutable) IsUncommittedVersion() bool { return desc.isUncommittedVersion } @@ -199,7 +199,22 @@ func (desc *wrapper) GetColumnAtIdx(idx int) *descpb.ColumnDescriptor { // ReadableColumns returns a list of columns (including those undergoing a // schema change) which can be scanned. -func (desc *Immutable) ReadableColumns() []descpb.ColumnDescriptor { +func (desc *wrapper) ReadableColumns() []descpb.ColumnDescriptor { + cols := make([]descpb.ColumnDescriptor, 0, len(desc.Columns)+len(desc.Mutations)) + cols = append(cols, desc.Columns...) + for _, m := range desc.Mutations { + if columnMutation := m.GetColumn(); columnMutation != nil { + col := *columnMutation + col.Nullable = true + cols = append(cols, col) + } + } + return cols +} + +// ReadableColumns returns a list of columns (including those undergoing a +// schema change) which can be scanned. +func (desc *immutable) ReadableColumns() []descpb.ColumnDescriptor { return desc.readableColumns } @@ -208,7 +223,7 @@ func (desc *Mutable) ImmutableCopy() catalog.Descriptor { // TODO (lucy): Should the immutable descriptor constructors always make a // copy, so we don't have to do it here? imm := NewImmutable(*protoutil.Clone(desc.TableDesc()).(*descpb.TableDescriptor)) - imm.isUncommittedVersion = desc.IsUncommittedVersion() + imm.(*immutable).isUncommittedVersion = desc.IsUncommittedVersion() return imm } diff --git a/pkg/sql/check.go b/pkg/sql/check.go index 166e49b42b37..d4006b909690 100644 --- a/pkg/sql/check.go +++ b/pkg/sql/check.go @@ -412,7 +412,7 @@ func formatValues(colNames []string, values tree.Datums) string { } // checkSet contains a subset of checks, as ordinals into -// Immutable.ActiveChecks. These checks have boolean columns +// immutable.ActiveChecks. These checks have boolean columns // produced as input to mutations, indicating the result of evaluating the // check. // diff --git a/pkg/sql/colfetcher/colbatch_scan.go b/pkg/sql/colfetcher/colbatch_scan.go index 98f7cf2a46c6..282c03da4f81 100644 --- a/pkg/sql/colfetcher/colbatch_scan.go +++ b/pkg/sql/colfetcher/colbatch_scan.go @@ -170,10 +170,10 @@ func NewColBatchScan( limitHint := execinfra.LimitHint(spec.LimitHint, post) returnMutations := spec.Visibility == execinfra.ScanVisibilityPublicAndNotPublic - // TODO(ajwerner): The need to construct an Immutable here + // TODO(ajwerner): The need to construct an immutable here // indicates that we're probably doing this wrong. Instead we should be // just setting the ID and Version in the spec or something like that and - // retrieving the hydrated Immutable from cache. + // retrieving the hydrated immutable from cache. table := tabledesc.NewImmutable(spec.Table) typs := table.ColumnTypesWithMutationsAndVirtualCol(returnMutations, spec.VirtualColumn) columnIdxMap := table.ColumnIdxMapWithMutations(returnMutations) @@ -239,16 +239,19 @@ func initCRowFetcher( codec keys.SQLCodec, allocator *colmem.Allocator, fetcher *cFetcher, - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, colIdxMap catalog.TableColMap, valNeededForCol util.FastIntSet, spec *execinfrapb.TableReaderSpec, systemColumnDescs []descpb.ColumnDescriptor, ) (index *descpb.IndexDescriptor, isSecondaryIndex bool, err error) { - index, isSecondaryIndex, err = desc.FindIndexByIndexIdx(int(spec.IndexIdx)) - if err != nil { - return nil, false, err + indexIdx := int(spec.IndexIdx) + if indexIdx >= len(desc.ActiveIndexes()) { + return nil, false, errors.Errorf("invalid indexIdx %d", indexIdx) } + indexI := desc.ActiveIndexes()[indexIdx] + index = indexI.IndexDesc() + isSecondaryIndex = !indexI.Primary() tableArgs := row.FetcherTableArgs{ Desc: desc, diff --git a/pkg/sql/comment_on_column.go b/pkg/sql/comment_on_column.go index 7005b414b8fd..91f0d25ebd31 100644 --- a/pkg/sql/comment_on_column.go +++ b/pkg/sql/comment_on_column.go @@ -15,7 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -24,7 +24,7 @@ import ( type commentOnColumnNode struct { n *tree.CommentOnColumn - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor } // CommentOnColumn add comment on a column. @@ -68,7 +68,7 @@ func (n *commentOnColumnNode) startExec(params runParams) error { sessiondata.InternalExecutorOverride{User: security.RootUserName()}, "UPSERT INTO system.comments VALUES ($1, $2, $3, $4)", keys.ColumnCommentType, - n.tableDesc.ID, + n.tableDesc.GetID(), col.ID, *n.n.Comment) if err != nil { @@ -82,7 +82,7 @@ func (n *commentOnColumnNode) startExec(params runParams) error { sessiondata.InternalExecutorOverride{User: security.RootUserName()}, "DELETE FROM system.comments WHERE type=$1 AND object_id=$2 AND sub_id=$3", keys.ColumnCommentType, - n.tableDesc.ID, + n.tableDesc.GetID(), col.ID) if err != nil { return err @@ -100,7 +100,7 @@ func (n *commentOnColumnNode) startExec(params runParams) error { } return params.p.logEvent(params.ctx, - n.tableDesc.ID, + n.tableDesc.GetID(), &eventpb.CommentOnColumn{ TableName: tn.FQString(), ColumnName: string(n.n.ColumnItem.ColumnName), diff --git a/pkg/sql/comment_on_table.go b/pkg/sql/comment_on_table.go index 309111b85d93..421272b8dd27 100644 --- a/pkg/sql/comment_on_table.go +++ b/pkg/sql/comment_on_table.go @@ -15,7 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -24,7 +24,7 @@ import ( type commentOnTableNode struct { n *tree.CommentOnTable - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor } // CommentOnTable add comment on a table. @@ -61,7 +61,7 @@ func (n *commentOnTableNode) startExec(params runParams) error { sessiondata.InternalExecutorOverride{User: security.RootUserName()}, "UPSERT INTO system.comments VALUES ($1, $2, 0, $3)", keys.TableCommentType, - n.tableDesc.ID, + n.tableDesc.GetID(), *n.n.Comment) if err != nil { return err @@ -74,7 +74,7 @@ func (n *commentOnTableNode) startExec(params runParams) error { sessiondata.InternalExecutorOverride{User: security.RootUserName()}, "DELETE FROM system.comments WHERE type=$1 AND object_id=$2 AND sub_id=0", keys.TableCommentType, - n.tableDesc.ID) + n.tableDesc.GetID()) if err != nil { return err } @@ -85,7 +85,7 @@ func (n *commentOnTableNode) startExec(params runParams) error { comment = *n.n.Comment } return params.p.logEvent(params.ctx, - n.tableDesc.ID, + n.tableDesc.GetID(), &eventpb.CommentOnTable{ TableName: params.p.ResolvedName(n.n.Table).FQString(), Comment: comment, diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index fff40f5a8e29..7fcfd37f0c1b 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -2499,7 +2499,7 @@ func (ex *connExecutor) notifyStatsRefresherOfNewTables(ctx context.Context) { // for rowsAffected because we want to make sure that stats always get // created/refreshed here. ex.planner.execCfg.StatsRefresher. - NotifyMutation(desc.ID, math.MaxInt32 /* rowsAffected */) + NotifyMutation(desc.GetID(), math.MaxInt32 /* rowsAffected */) } } } diff --git a/pkg/sql/conn_executor_exec.go b/pkg/sql/conn_executor_exec.go index eb1e434d9931..1105be388173 100644 --- a/pkg/sql/conn_executor_exec.go +++ b/pkg/sql/conn_executor_exec.go @@ -760,7 +760,7 @@ func validatePrimaryKeys(tc *descs.Collection) error { if !table.HasPrimaryKey() { return unimplemented.NewWithIssuef(48026, "primary key of table %s dropped without subsequent addition of new primary key", - table.Name, + table.GetName(), ) } } diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index c81963cab465..113efce23d0a 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -333,7 +333,7 @@ CREATE TABLE crdb_internal.tables ( } } locality := tree.DNull - if c := table.TableDesc().LocalityConfig; c != nil { + if c := table.GetLocalityConfig(); c != nil { f := tree.NewFmtCtx(tree.FmtSimple) if err := tabledesc.FormatTableLocalityConfig(c, f); err != nil { return err @@ -365,7 +365,7 @@ CREATE TABLE crdb_internal.tables ( // Note: we do not use forEachTableDesc() here because we want to // include added and dropped descriptors. for _, desc := range descs { - table, ok := desc.(*tabledesc.Immutable) + table, ok := desc.(catalog.TableDescriptor) if !ok || p.CheckAnyPrivilege(ctx, table) != nil { continue } @@ -484,14 +484,14 @@ CREATE TABLE crdb_internal.schema_changes ( // Note: we do not use forEachTableDesc() here because we want to // include added and dropped descriptors. for _, desc := range descs { - table, ok := desc.(*tabledesc.Immutable) + table, ok := desc.(catalog.TableDescriptor) if !ok || p.CheckAnyPrivilege(ctx, table) != nil { continue } - tableID := tree.NewDInt(tree.DInt(int64(table.ID))) + tableID := tree.NewDInt(tree.DInt(int64(table.GetID()))) parentID := tree.NewDInt(tree.DInt(int64(table.GetParentID()))) - tableName := tree.NewDString(table.Name) - for _, mut := range table.Mutations { + tableName := tree.NewDString(table.GetName()) + for _, mut := range table.GetMutations() { mutType := "UNKNOWN" targetID := tree.DNull targetName := tree.DNull @@ -2509,8 +2509,8 @@ CREATE TABLE crdb_internal.ranges_no_leases ( for _, desc := range descs { id := uint32(desc.GetID()) switch desc := desc.(type) { - case *tabledesc.Immutable: - parents[id] = uint32(desc.ParentID) + case catalog.TableDescriptor: + parents[id] = uint32(desc.GetParentID()) tableNames[id] = desc.GetName() indexNames[id] = make(map[uint32]string) for _, idx := range desc.PublicNonPrimaryIndexes() { @@ -2776,7 +2776,7 @@ CREATE TABLE crdb_internal.zones ( return err } - var table *tabledesc.Immutable + var table catalog.TableDescriptor if zs.Database != "" { database, err := catalogkv.MustGetDatabaseDescByID(ctx, p.txn, p.ExecCfg().Codec, descpb.ID(id)) if err != nil { @@ -3703,7 +3703,7 @@ CREATE TABLE crdb_internal.predefined_comments ( if vTableEntry.comment != "" { if err := addRow( tableCommentKey, - tree.NewDInt(tree.DInt(table.ID)), + tree.NewDInt(tree.DInt(table.GetID())), zeroVal, tree.NewDString(vTableEntry.comment)); err != nil { return err diff --git a/pkg/sql/create_stats.go b/pkg/sql/create_stats.go index 8089ec02cad0..1a9e18adb4b1 100644 --- a/pkg/sql/create_stats.go +++ b/pkg/sql/create_stats.go @@ -21,10 +21,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -158,7 +158,7 @@ func (n *createStatsNode) startJob(ctx context.Context, resultsCh chan<- tree.Da // makeJobRecord creates a CreateStats job record which can be used to plan and // execute statistics creation. func (n *createStatsNode) makeJobRecord(ctx context.Context) (*jobs.Record, error) { - var tableDesc *tabledesc.Immutable + var tableDesc catalog.TableDescriptor var fqTableName string var err error switch t := n.Table.(type) { @@ -270,7 +270,7 @@ func (n *createStatsNode) makeJobRecord(ctx context.Context) (*jobs.Record, erro Details: jobspb.CreateStatsDetails{ Name: string(n.Name), FQTableName: fqTableName, - Table: tableDesc.TableDescriptor, + Table: *tableDesc.TableDesc(), ColumnStats: colStats, Statement: eventLogStatement, AsOf: asOf, @@ -302,7 +302,7 @@ const maxNonIndexCols = 100 // other columns from the table. We only collect histograms for index columns, // plus any other boolean or enum columns (where the "histogram" is tiny). func createStatsDefaultColumns( - desc *tabledesc.Immutable, multiColEnabled bool, + desc catalog.TableDescriptor, multiColEnabled bool, ) ([]jobspb.CreateStatsDetails_ColStat, error) { colStats := make([]jobspb.CreateStatsDetails_ColStat, 0, len(desc.ActiveIndexes())) @@ -427,8 +427,8 @@ func createStatsDefaultColumns( // Add all remaining columns in the table, up to maxNonIndexCols. nonIdxCols := 0 - for i := 0; i < len(desc.Columns) && nonIdxCols < maxNonIndexCols; i++ { - col := &desc.Columns[i] + for i := 0; i < len(desc.GetPublicColumns()) && nonIdxCols < maxNonIndexCols; i++ { + col := &desc.GetPublicColumns()[i] colList := []descpb.ColumnID{col.ID} if !trackStatsIfNotExists(colList) { diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index 7eaa02385a81..3a4aade564a2 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -420,7 +420,7 @@ func (n *createTableNode) startExec(params runParams) error { params.ctx, params.p.txn, params.ExecCfg().Codec, - desc.ImmutableCopy().(*tabledesc.Immutable), + desc.ImmutableCopy().(catalog.TableDescriptor), desc.Columns, params.p.alloc) if err != nil { @@ -1175,7 +1175,7 @@ func addInterleave( } intl := descpb.InterleaveDescriptor_Ancestor{ - TableID: parentTable.ID, + TableID: parentTable.GetID(), IndexID: parentIndex.GetID(), SharedPrefixLen: uint32(parentIndex.NumColumns()), } diff --git a/pkg/sql/create_test.go b/pkg/sql/create_test.go index a62f4bbdddfb..4f40504d4399 100644 --- a/pkg/sql/create_test.go +++ b/pkg/sql/create_test.go @@ -236,24 +236,24 @@ func verifyTables( tableName := fmt.Sprintf("table_%d", id) kvDB := tc.Servers[count%tc.NumServers()].DB() tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", tableName) - if tableDesc.ID < descIDStart { + if tableDesc.GetID() < descIDStart { t.Fatalf( "table %s's ID %d is too small. Expected >= %d", tableName, - tableDesc.ID, + tableDesc.GetID(), descIDStart, ) - if _, ok := tableIDs[tableDesc.ID]; ok { + if _, ok := tableIDs[tableDesc.GetID()]; ok { t.Fatalf("duplicate ID: %d", id) } - tableIDs[tableDesc.ID] = struct{}{} - if tableDesc.ID > maxID { - maxID = tableDesc.ID + tableIDs[tableDesc.GetID()] = struct{}{} + if tableDesc.GetID() > maxID { + maxID = tableDesc.GetID() } } - usedTableIDs[tableDesc.ID] = tableName + usedTableIDs[tableDesc.GetID()] = tableName } if e, a := expectedNumOfTables, len(usedTableIDs); e != a { diff --git a/pkg/sql/create_view.go b/pkg/sql/create_view.go index 73e66ffb59e4..dc244faf56cd 100644 --- a/pkg/sql/create_view.go +++ b/pkg/sql/create_view.go @@ -78,7 +78,7 @@ func (n *createViewNode) startExec(params runParams) error { // Check that the view does not contain references to other databases. if !allowCrossDatabaseViews.Get(¶ms.p.execCfg.Settings.SV) { for _, dep := range n.planDeps { - if dbID := dep.desc.ParentID; dbID != n.dbDesc.ID && dbID != keys.SystemDatabaseID { + if dbID := dep.desc.GetParentID(); dbID != n.dbDesc.ID && dbID != keys.SystemDatabaseID { return errors.WithHintf( pgerror.Newf(pgcode.FeatureNotSupported, "the view cannot refer to other databases; (see the '%s' cluster setting)", @@ -246,7 +246,7 @@ func (n *createViewNode) startExec(params runParams) error { backRefMutable, descpb.InvalidMutationID, fmt.Sprintf("updating view reference %q in table %s(%d)", n.viewName, - updated.desc.Name, updated.desc.ID, + updated.desc.GetName(), updated.desc.GetID(), ), ); err != nil { return err diff --git a/pkg/sql/delete_range.go b/pkg/sql/delete_range.go index 2535d66f7e90..1ff0a3816620 100644 --- a/pkg/sql/delete_range.go +++ b/pkg/sql/delete_range.go @@ -16,8 +16,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -40,10 +40,10 @@ type deleteRangeNode struct { // spans are the spans to delete. spans roachpb.Spans // desc is the table descriptor the delete is operating on. - desc *tabledesc.Immutable + desc catalog.TableDescriptor // interleavedDesc are the table descriptors of any child interleaved tables // the delete is operating on. - interleavedDesc []*tabledesc.Immutable + interleavedDesc []catalog.TableDescriptor // fetcher is around to decode the returned keys from the DeleteRange, so that // we can count the number of rows deleted. fetcher row.Fetcher @@ -167,7 +167,7 @@ func (d *deleteRangeNode) startExec(params runParams) error { } // Possibly initiate a run of CREATE STATISTICS. - params.ExecCfg().StatsRefresher.NotifyMutation(d.desc.ID, d.rowCount) + params.ExecCfg().StatsRefresher.NotifyMutation(d.desc.GetID(), d.rowCount) return nil } diff --git a/pkg/sql/descriptor_mutation_test.go b/pkg/sql/descriptor_mutation_test.go index d90650f22a2f..37636b8f69ea 100644 --- a/pkg/sql/descriptor_mutation_test.go +++ b/pkg/sql/descriptor_mutation_test.go @@ -1154,11 +1154,11 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR UNIQUE); {"v", 4, descpb.DescriptorMutation_DELETE_AND_WRITE_ONLY}, } - if len(tableDesc.Mutations) != len(expected) { - t.Fatalf("%d mutations, instead of expected %d", len(tableDesc.Mutations), len(expected)) + if len(tableDesc.GetMutations()) != len(expected) { + t.Fatalf("%d mutations, instead of expected %d", len(tableDesc.GetMutations()), len(expected)) } - for i, m := range tableDesc.Mutations { + for i, m := range tableDesc.GetMutations() { name := expected[i].name if col := m.GetColumn(); col != nil { if col.Name != name { diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index a5c15588db8b..11c87bdae09e 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -25,9 +25,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/distsql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -967,12 +967,12 @@ func (dsp *DistSQLPlanner) nodeVersionIsCompatible(nodeID roachpb.NodeID) bool { return distsql.FlowVerIsCompatible(dsp.planVersion, v.MinAcceptedVersion, v.Version) } -func getIndexIdx(index *descpb.IndexDescriptor, desc *tabledesc.Immutable) (uint32, error) { +func getIndexIdx(index *descpb.IndexDescriptor, desc catalog.TableDescriptor) (uint32, error) { foundIndex, _ := desc.FindIndexWithID(index.ID) if foundIndex != nil && foundIndex.Public() { return uint32(foundIndex.Ordinal()), nil } - return 0, errors.Errorf("invalid index %v (table %s)", index, desc.Name) + return 0, errors.Errorf("invalid index %v (table %s)", index, desc.GetName()) } // initTableReaderSpec initializes a TableReaderSpec/PostProcessSpec that @@ -1039,15 +1039,15 @@ func getVirtualColumn( // tableOrdinal returns the index of a column with the given ID. func tableOrdinal( - desc *tabledesc.Immutable, colID descpb.ColumnID, visibility execinfrapb.ScanVisibility, + desc catalog.TableDescriptor, colID descpb.ColumnID, visibility execinfrapb.ScanVisibility, ) int { - for i := range desc.Columns { - if desc.Columns[i].ID == colID { + for i := range desc.GetPublicColumns() { + if desc.GetPublicColumns()[i].ID == colID { return i } } if visibility == execinfra.ScanVisibilityPublicAndNotPublic { - offset := len(desc.Columns) + offset := len(desc.GetPublicColumns()) mutationColumns := desc.MutationColumns() for i := range mutationColumns { if mutationColumns[i].ID == colID { @@ -1061,16 +1061,16 @@ func tableOrdinal( // different for each system column kind. MVCCTimestampColumnID is the // largest column ID, and all system columns are decreasing from it. if colinfo.IsColIDSystemColumn(colID) { - return len(desc.Columns) + len(desc.MutationColumns()) + int(colinfo.MVCCTimestampColumnID-colID) + return len(desc.GetPublicColumns()) + len(desc.MutationColumns()) + int(colinfo.MVCCTimestampColumnID-colID) } panic(errors.AssertionFailedf("column %d not in desc.Columns", colID)) } -func highestTableOrdinal(desc *tabledesc.Immutable, visibility execinfrapb.ScanVisibility) int { - highest := len(desc.Columns) - 1 +func highestTableOrdinal(desc catalog.TableDescriptor, visibility execinfrapb.ScanVisibility) int { + highest := len(desc.GetPublicColumns()) - 1 if visibility == execinfra.ScanVisibilityPublicAndNotPublic { - highest = len(desc.Columns) + len(desc.MutationColumns()) - 1 + highest = len(desc.GetPublicColumns()) + len(desc.MutationColumns()) - 1 } return highest } @@ -1078,7 +1078,9 @@ func highestTableOrdinal(desc *tabledesc.Immutable, visibility execinfrapb.ScanV // toTableOrdinals returns a mapping from column ordinals in cols to table // reader column ordinals. func toTableOrdinals( - cols []*descpb.ColumnDescriptor, desc *tabledesc.Immutable, visibility execinfrapb.ScanVisibility, + cols []*descpb.ColumnDescriptor, + desc catalog.TableDescriptor, + visibility execinfrapb.ScanVisibility, ) []int { res := make([]int, len(cols)) for i := range res { @@ -1234,7 +1236,7 @@ func (dsp *DistSQLPlanner) createTableReaders( type tableReaderPlanningInfo struct { spec *execinfrapb.TableReaderSpec post execinfrapb.PostProcessSpec - desc *tabledesc.Immutable + desc catalog.TableDescriptor spans []roachpb.Span reverse bool scanVisibility execinfrapb.ScanVisibility @@ -1321,8 +1323,8 @@ func (dsp *DistSQLPlanner) planTableReaders( planToStreamColMap := make([]int, len(info.cols)) var descColumnIDs util.FastIntMap colID := 0 - for i := range info.desc.Columns { - descColumnIDs.Set(colID, int(info.desc.Columns[i].ID)) + for i := range info.desc.GetPublicColumns() { + descColumnIDs.Set(colID, int(info.desc.GetPublicColumns()[i].ID)) colID++ } if returnMutations { @@ -2267,7 +2269,7 @@ func (dsp *DistSQLPlanner) createPlanForZigzagJoin( cols[i].Columns[j] = uint32(col) } - numStreamCols += len(side.scan.desc.Columns) + numStreamCols += len(side.scan.desc.GetPublicColumns()) } // The zigzag join node only represents inner joins, so hardcode Type to @@ -2324,7 +2326,7 @@ func (dsp *DistSQLPlanner) createPlanForZigzagJoin( i++ } - colOffset += len(side.scan.desc.Columns) + colOffset += len(side.scan.desc.GetPublicColumns()) } // Set the ON condition. diff --git a/pkg/sql/distsql_physical_planner_test.go b/pkg/sql/distsql_physical_planner_test.go index a8becdeed824..d41fdb78e255 100644 --- a/pkg/sql/distsql_physical_planner_test.go +++ b/pkg/sql/distsql_physical_planner_test.go @@ -32,8 +32,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/distsql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -65,7 +65,7 @@ import ( // TODO(radu): we should verify that the queries in tests using SplitTable // are indeed distributed as intended. func SplitTable( - t *testing.T, tc serverutils.TestClusterInterface, desc *tabledesc.Immutable, sps []SplitPoint, + t *testing.T, tc serverutils.TestClusterInterface, desc catalog.TableDescriptor, sps []SplitPoint, ) { if tc.ReplicationMode() != base.ReplicationManual { t.Fatal("SplitTable called on a test cluster that was not in manual replication mode") diff --git a/pkg/sql/distsql_plan_csv.go b/pkg/sql/distsql_plan_csv.go index 25d29ab36174..702e433b2e07 100644 --- a/pkg/sql/distsql_plan_csv.go +++ b/pkg/sql/distsql_plan_csv.go @@ -158,7 +158,7 @@ func presplitTableBoundaries( expirationTime := cfg.DB.Clock().Now().Add(time.Hour.Nanoseconds(), 0) for _, tbl := range tables { // TODO(ajwerner): Consider passing in the wrapped descriptors. - tblDesc := tabledesc.MakeImmutable(*tbl.Desc) + tblDesc := tabledesc.NewImmutable(*tbl.Desc) for _, span := range tblDesc.AllIndexSpans(cfg.Codec) { if err := cfg.DB.AdminSplit(ctx, span.Key, expirationTime); err != nil { return err diff --git a/pkg/sql/distsql_plan_stats.go b/pkg/sql/distsql_plan_stats.go index 2420beb0808b..5320f86009d1 100644 --- a/pkg/sql/distsql_plan_stats.go +++ b/pkg/sql/distsql_plan_stats.go @@ -53,7 +53,7 @@ var maxTimestampAge = settings.RegisterDurationSetting( ) func (dsp *DistSQLPlanner) createStatsPlan( - planCtx *PlanningCtx, desc *tabledesc.Immutable, reqStats []requestedStat, job *jobs.Job, + planCtx *PlanningCtx, desc catalog.TableDescriptor, reqStats []requestedStat, job *jobs.Job, ) (*PhysicalPlan, error) { if len(reqStats) == 0 { return nil, errors.New("no stats requested") @@ -186,7 +186,7 @@ func (dsp *DistSQLPlanner) createStatsPlan( ) // Estimate the expected number of rows based on existing stats in the cache. - tableStats, err := planCtx.ExtendedEvalCtx.ExecCfg.TableStatsCache.GetTableStats(planCtx.ctx, desc.ID) + tableStats, err := planCtx.ExtendedEvalCtx.ExecCfg.TableStatsCache.GetTableStats(planCtx.ctx, desc.GetID()) if err != nil { return nil, err } @@ -213,7 +213,7 @@ func (dsp *DistSQLPlanner) createStatsPlan( InvertedSketches: invSketchSpecs, SampleSize: sampler.SampleSize, SampledColumnIDs: sampledColumnIDs, - TableID: desc.ID, + TableID: desc.GetID(), JobID: jobID, RowsExpected: rowsExpected, } diff --git a/pkg/sql/drop_sequence.go b/pkg/sql/drop_sequence.go index 3b7f6e75ea0a..5c2e57475bc4 100644 --- a/pkg/sql/drop_sequence.go +++ b/pkg/sql/drop_sequence.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" + "github.com/cockroachdb/cockroach/pkg/util/iterutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/errors" @@ -174,33 +175,47 @@ func (p *planner) canRemoveOwnedSequencesImpl( } return err } - dependedOnBy := seqDesc.GetDependedOnBy() - affectsNoColumns := len(dependedOnBy) == 0 - // It is okay if the sequence is depended on by columns that are being - // dropped in the same transaction - canBeSafelyRemoved := len(dependedOnBy) == 1 && dependedOnBy[0].ID == desc.ID - // If only the column is being dropped, no other columns of the table can - // depend on that sequence either - if isColumnDrop { - canBeSafelyRemoved = canBeSafelyRemoved && len(dependedOnBy[0].ColumnIDs) == 1 && - dependedOnBy[0].ColumnIDs[0] == col.ID + + var firstDep *descpb.TableDescriptor_Reference + multipleIterationErr := seqDesc.ForeachDependedOnBy(func(dep *descpb.TableDescriptor_Reference) error { + if firstDep != nil { + return iterutil.StopIteration() + } + firstDep = dep + return nil + }) + + if firstDep == nil { + // This sequence is not depended on by anything, it's safe to remove. + continue } - canRemove := affectsNoColumns || canBeSafelyRemoved + if multipleIterationErr == nil && firstDep.ID == desc.ID { + // This sequence is depended on only by columns in the table of interest. + if !isColumnDrop { + // Either we're dropping the whole table and thereby also anything + // that might depend on this sequence, making it safe to remove... + continue + } + // ...or we're dropping a column in the table of interest. + if len(firstDep.ColumnIDs) == 1 && firstDep.ColumnIDs[0] == col.ID { + // The sequence is safe to remove iff it's not depended on by any other + // columns in the table other than that one. + continue + } + } // Once Drop Sequence Cascade actually respects the drop behavior, this // check should go away. - if behavior == tree.DropCascade && !canRemove { + if behavior == tree.DropCascade { return unimplemented.NewWithIssue(20965, "DROP SEQUENCE CASCADE is currently unimplemented") } // If Cascade is not enabled, and more than 1 columns depend on it, and the - if behavior != tree.DropCascade && !canRemove { - return pgerror.Newf( - pgcode.DependentObjectsStillExist, - "cannot drop table %s because other objects depend on it", - desc.Name, - ) - } + return pgerror.Newf( + pgcode.DependentObjectsStillExist, + "cannot drop table %s because other objects depend on it", + desc.Name, + ) } return nil } diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index 7ee23c0358da..35f5d62f4a33 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" @@ -131,14 +132,14 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); if err != nil { t.Fatal(err) } - if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tbDesc.ID, buf); err != nil { + if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tbDesc.GetID(), buf); err != nil { t.Fatal(err) } if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, dbDesc.GetID(), buf); err != nil { t.Fatal(err) } - if err := zoneExists(sqlDB, &cfg, tbDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, tbDesc.GetID()); err != nil { t.Fatal(err) } if err := zoneExists(sqlDB, &cfg, dbDesc.GetID()); err != nil { @@ -160,11 +161,11 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); // Data is not deleted. tests.CheckKeyCount(t, kvDB, tableSpan, 6) - if err := descExists(sqlDB, true, tbDesc.ID); err != nil { + if err := descExists(sqlDB, true, tbDesc.GetID()); err != nil { t.Fatal(err) } tbNameKey := catalogkeys.MakeNameMetadataKey(keys.SystemSQLCodec, - tbDesc.GetParentID(), keys.PublicSchemaID, tbDesc.Name) + tbDesc.GetParentID(), keys.PublicSchemaID, tbDesc.GetName()) if gr, err := kvDB.Get(ctx, tbNameKey); err != nil { t.Fatal(err) } else if gr.Exists() { @@ -186,7 +187,7 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); t.Fatalf("database descriptor key still exists after database is dropped") } - if err := zoneExists(sqlDB, &cfg, tbDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, tbDesc.GetID()); err != nil { t.Fatal(err) } @@ -203,7 +204,7 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); Username: security.RootUserName(), Description: "DROP DATABASE t CASCADE", DescriptorIDs: descpb.IDs{ - tbDesc.ID, + tbDesc.GetID(), }, }); err != nil { t.Fatal(err) @@ -319,7 +320,7 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); Username: security.RootUserName(), Description: "DROP DATABASE t CASCADE", DescriptorIDs: descpb.IDs{ - tbDesc.ID, tb2Desc.ID, + tbDesc.GetID(), tb2Desc.GetID(), }, }); err != nil { t.Fatal(err) @@ -327,16 +328,16 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); // Push a new zone config for the table with TTL=0 so the data is // deleted immediately. - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tbDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tbDesc.GetID()); err != nil { t.Fatal(err) } testutils.SucceedsSoon(t, func() error { - if err := descExists(sqlDB, false, tbDesc.ID); err != nil { + if err := descExists(sqlDB, false, tbDesc.GetID()); err != nil { return err } - return zoneExists(sqlDB, nil, tbDesc.ID) + return zoneExists(sqlDB, nil, tbDesc.GetID()) }) // Table 1 data is deleted. @@ -353,12 +354,12 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); Username: security.RootUserName(), Description: "GC for DROP DATABASE t CASCADE", DescriptorIDs: descpb.IDs{ - tbDesc.ID, tb2Desc.ID, + tbDesc.GetID(), tb2Desc.GetID(), }, }) }) - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tb2Desc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tb2Desc.GetID()); err != nil { t.Fatal(err) } if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, dbDesc.GetID()); err != nil { @@ -366,11 +367,11 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); } testutils.SucceedsSoon(t, func() error { - if err := descExists(sqlDB, false, tb2Desc.ID); err != nil { + if err := descExists(sqlDB, false, tb2Desc.GetID()); err != nil { return err } - return zoneExists(sqlDB, nil, tb2Desc.ID) + return zoneExists(sqlDB, nil, tb2Desc.GetID()) }) // Table 2 data is deleted. @@ -380,7 +381,7 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); Username: security.RootUserName(), Description: "DROP DATABASE t CASCADE", DescriptorIDs: descpb.IDs{ - tbDesc.ID, tb2Desc.ID, + tbDesc.GetID(), tb2Desc.GetID(), }, }); err != nil { t.Fatal(err) @@ -451,7 +452,7 @@ func TestDropIndex(t *testing.T) { Username: security.RootUserName(), Description: `DROP INDEX t.public.kv@foo`, DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }); err != nil { t.Fatal(err) @@ -472,7 +473,7 @@ func TestDropIndex(t *testing.T) { clearIndexAttempt = true // Add a zone config for the table. - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()); err != nil { t.Fatal(err) } @@ -481,7 +482,7 @@ func TestDropIndex(t *testing.T) { Username: security.RootUserName(), Description: `DROP INDEX t.public.kv@foo`, DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }) }) @@ -491,7 +492,7 @@ func TestDropIndex(t *testing.T) { Username: security.RootUserName(), Description: `GC for DROP INDEX t.public.kv@foo`, DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }) }) @@ -546,7 +547,7 @@ func TestDropIndexWithZoneConfigOSS(t *testing.T) { if err != nil { t.Fatal(err) } - sqlDB.Exec(t, `INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.ID, zoneConfigBytes) + sqlDB.Exec(t, `INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.GetID(), zoneConfigBytes) if !sqlutils.ZoneConfigExists(t, sqlDB, "INDEX t.public.kv@foo") { t.Fatal("zone config for index does not exist") } @@ -630,12 +631,12 @@ func TestDropTable(t *testing.T) { } // Add a zone config for the table. - cfg, err := sqltestutils.AddDefaultZoneConfig(sqlDB, tableDesc.ID) + cfg, err := sqltestutils.AddDefaultZoneConfig(sqlDB, tableDesc.GetID()) if err != nil { t.Fatal(err) } - if err := zoneExists(sqlDB, &cfg, tableDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, tableDesc.GetID()); err != nil { t.Fatal(err) } @@ -666,7 +667,7 @@ func TestDropTable(t *testing.T) { Username: security.RootUserName(), Description: `DROP TABLE t.public.kv`, DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }); err != nil { t.Fatal(err) @@ -681,11 +682,11 @@ func TestDropTable(t *testing.T) { // asynchronous path. tests.CheckKeyCount(t, kvDB, tableSpan, 3*numRows) - if err := descExists(sqlDB, true, tableDesc.ID); err != nil { + if err := descExists(sqlDB, true, tableDesc.GetID()); err != nil { t.Fatal(err) } - if err := zoneExists(sqlDB, &cfg, tableDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, tableDesc.GetID()); err != nil { t.Fatal(err) } } @@ -709,7 +710,7 @@ func TestDropTableDeleteData(t *testing.T) { const numRows = 2*row.TableTruncateChunkSize + 1 const numKeys = 3 * numRows const numTables = 5 - var descs []*tabledesc.Immutable + var descs []catalog.TableDescriptor for i := 0; i < numTables; i++ { tableName := fmt.Sprintf("test%d", i) if err := tests.CreateKVTable(sqlDB, tableName, numRows); err != nil { @@ -742,7 +743,7 @@ func TestDropTableDeleteData(t *testing.T) { // Data hasn't been GC-ed. sqlRun := sqlutils.MakeSQLRunner(sqlDB) for i := 0; i < numTables; i++ { - if err := descExists(sqlDB, true, descs[i].ID); err != nil { + if err := descExists(sqlDB, true, descs[i].GetID()); err != nil { t.Fatal(err) } tableSpan := descs[i].TableSpan(keys.SystemSQLCodec) @@ -752,7 +753,7 @@ func TestDropTableDeleteData(t *testing.T) { Username: security.RootUserName(), Description: fmt.Sprintf(`DROP TABLE t.public.%s`, descs[i].GetName()), DescriptorIDs: descpb.IDs{ - descs[i].ID, + descs[i].GetID(), }, }); err != nil { t.Fatal(err) @@ -761,18 +762,18 @@ func TestDropTableDeleteData(t *testing.T) { // The closure pushes a zone config reducing the TTL to 0 for descriptor i. pushZoneCfg := func(i int) { - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, descs[i].ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, descs[i].GetID()); err != nil { t.Fatal(err) } } checkTableGCed := func(i int) { testutils.SucceedsSoon(t, func() error { - if err := descExists(sqlDB, false, descs[i].ID); err != nil { + if err := descExists(sqlDB, false, descs[i].GetID()); err != nil { return err } - return zoneExists(sqlDB, nil, descs[i].ID) + return zoneExists(sqlDB, nil, descs[i].GetID()) }) tableSpan := descs[i].TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 0) @@ -782,7 +783,7 @@ func TestDropTableDeleteData(t *testing.T) { Username: security.RootUserName(), Description: fmt.Sprintf(`DROP TABLE t.public.%s`, descs[i].GetName()), DescriptorIDs: descpb.IDs{ - descs[i].ID, + descs[i].GetID(), }, }); err != nil { t.Fatal(err) @@ -794,7 +795,7 @@ func TestDropTableDeleteData(t *testing.T) { Username: security.RootUserName(), Description: fmt.Sprintf(`GC for DROP TABLE t.public.%s`, descs[i].GetName()), DescriptorIDs: descpb.IDs{ - descs[i].ID, + descs[i].GetID(), }, }) }) @@ -948,12 +949,12 @@ func TestDropTableInterleavedDeleteData(t *testing.T) { t.Fatalf("different error than expected: %v", err) } - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDescInterleaved.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDescInterleaved.GetID()); err != nil { t.Fatal(err) } testutils.SucceedsSoon(t, func() error { - return descExists(sqlDB, false, tableDescInterleaved.ID) + return descExists(sqlDB, false, tableDescInterleaved.GetID()) }) tests.CheckKeyCount(t, kvDB, tableSpan, numRows) @@ -1038,7 +1039,7 @@ func TestDropDatabaseAfterDropTable(t *testing.T) { Username: security.RootUserName(), Description: "DROP TABLE t.public.kv", DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }); err != nil { t.Fatal(err) diff --git a/pkg/sql/execinfrapb/flow_diagram.go b/pkg/sql/execinfrapb/flow_diagram.go index 08b77da81f8e..0e96f410d4cc 100644 --- a/pkg/sql/execinfrapb/flow_diagram.go +++ b/pkg/sql/execinfrapb/flow_diagram.go @@ -148,8 +148,8 @@ func (tr *TableReaderSpec) summary() (string, []string) { if len(tr.Spans) > 0 { tbl := tabledesc.NewImmutable(tr.Table) // only show the first span - idx, _, _ := tbl.FindIndexByIndexIdx(int(tr.IndexIdx)) - valDirs := catalogkeys.IndexKeyValDirs(idx) + idx := tbl.ActiveIndexes()[int(tr.IndexIdx)] + valDirs := catalogkeys.IndexKeyValDirs(idx.IndexDesc()) var spanStr strings.Builder spanStr.WriteString("Spans: ") diff --git a/pkg/sql/gcjob/index_garbage_collection.go b/pkg/sql/gcjob/index_garbage_collection.go index 2ab01a1314bc..e1b75b1616ce 100644 --- a/pkg/sql/gcjob/index_garbage_collection.go +++ b/pkg/sql/gcjob/index_garbage_collection.go @@ -17,9 +17,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -45,7 +45,7 @@ func gcIndexes( return err } - var parentTable *tabledesc.Immutable + var parentTable catalog.TableDescriptor if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { parentTable, err = catalogkv.MustGetTableDescByID(ctx, txn, execCfg.Codec, parentID) return err @@ -82,10 +82,10 @@ func gcIndexes( func clearIndex( ctx context.Context, execCfg *sql.ExecutorConfig, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, index descpb.IndexDescriptor, ) error { - log.Infof(ctx, "clearing index %d from table %d", index.ID, tableDesc.ID) + log.Infof(ctx, "clearing index %d from table %d", index.ID, tableDesc.GetID()) if index.IsInterleaved() { return errors.Errorf("unexpected interleaved index %d", index.ID) } @@ -109,11 +109,11 @@ func clearIndex( func completeDroppedIndex( ctx context.Context, execCfg *sql.ExecutorConfig, - table *tabledesc.Immutable, + table catalog.TableDescriptor, indexID descpb.IndexID, progress *jobspb.SchemaChangeGCProgress, ) error { - if err := updateDescriptorGCMutations(ctx, execCfg, table.ID, indexID); err != nil { + if err := updateDescriptorGCMutations(ctx, execCfg, table.GetID(), indexID); err != nil { return errors.Wrapf(err, "updating GC mutations") } diff --git a/pkg/sql/gcjob/refresh_statuses.go b/pkg/sql/gcjob/refresh_statuses.go index ea647436fe4b..95db21e99ca4 100644 --- a/pkg/sql/gcjob/refresh_statuses.go +++ b/pkg/sql/gcjob/refresh_statuses.go @@ -26,7 +26,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" @@ -145,7 +144,7 @@ func updateTableStatus( execCfg *sql.ExecutorConfig, ttlSeconds int64, protectedtsCache protectedts.Cache, - table *tabledesc.Immutable, + table catalog.TableDescriptor, tableDropTimes map[descpb.ID]int64, progress *jobspb.SchemaChangeGCProgress, ) time.Time { @@ -154,7 +153,7 @@ func updateTableStatus( for i, t := range progress.Tables { droppedTable := &progress.Tables[i] - if droppedTable.ID != table.ID || droppedTable.Status == jobspb.SchemaChangeGCProgress_DELETED { + if droppedTable.ID != table.GetID() || droppedTable.Status == jobspb.SchemaChangeGCProgress_DELETED { continue } @@ -190,7 +189,7 @@ func updateIndexesStatus( ctx context.Context, execCfg *sql.ExecutorConfig, tableTTL int32, - table *tabledesc.Immutable, + table catalog.TableDescriptor, protectedtsCache protectedts.Cache, zoneCfg *zonepb.ZoneConfig, indexDropTimes map[descpb.IndexID]int64, @@ -211,19 +210,19 @@ func updateIndexesStatus( deadlineNanos := indexDropTimes[idxProgress.IndexID] + int64(ttlSeconds)*time.Second.Nanoseconds() deadline := timeutil.Unix(0, deadlineNanos) if isProtected(ctx, protectedtsCache, indexDropTimes[idxProgress.IndexID], sp) { - log.Infof(ctx, "a timestamp protection delayed GC of index %d from table %d", idxProgress.IndexID, table.ID) + log.Infof(ctx, "a timestamp protection delayed GC of index %d from table %d", idxProgress.IndexID, table.GetID()) continue } lifetime := time.Until(deadline) if lifetime > 0 { if log.V(2) { - log.Infof(ctx, "index %d from table %d still has %+v until GC", idxProgress.IndexID, table.ID, lifetime) + log.Infof(ctx, "index %d from table %d still has %+v until GC", idxProgress.IndexID, table.GetID(), lifetime) } } if lifetime < 0 { expired = true if log.V(2) { - log.Infof(ctx, "detected expired index %d from table %d", idxProgress.IndexID, table.ID) + log.Infof(ctx, "detected expired index %d from table %d", idxProgress.IndexID, table.GetID()) } idxProgress.Status = jobspb.SchemaChangeGCProgress_DELETING } else if deadline.Before(soonestDeadline) { diff --git a/pkg/sql/gcjob/table_garbage_collection.go b/pkg/sql/gcjob/table_garbage_collection.go index c1c03dcedf06..7d8277cc25ab 100644 --- a/pkg/sql/gcjob/table_garbage_collection.go +++ b/pkg/sql/gcjob/table_garbage_collection.go @@ -22,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" @@ -43,7 +42,7 @@ func gcTables( continue } - var table *tabledesc.Immutable + var table catalog.TableDescriptor if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error table, err = catalogkv.MustGetTableDescByID(ctx, txn, execCfg.Codec, droppedTable.ID) @@ -67,16 +66,16 @@ func gcTables( // First, delete all the table data. if err := ClearTableData(ctx, execCfg.DB, execCfg.DistSender, execCfg.Codec, table); err != nil { - return errors.Wrapf(err, "clearing data for table %d", table.ID) + return errors.Wrapf(err, "clearing data for table %d", table.GetID()) } // Finished deleting all the table data, now delete the table meta data. if err := sql.DeleteTableDescAndZoneConfig(ctx, execCfg.DB, execCfg.Codec, table); err != nil { - return errors.Wrapf(err, "dropping table descriptor for table %d", table.ID) + return errors.Wrapf(err, "dropping table descriptor for table %d", table.GetID()) } // Update the details payload to indicate that the table was dropped. - markTableGCed(ctx, table.ID, progress) + markTableGCed(ctx, table.GetID(), progress) } return nil } @@ -87,20 +86,20 @@ func ClearTableData( db *kv.DB, distSender *kvcoord.DistSender, codec keys.SQLCodec, - table *tabledesc.Immutable, + table catalog.TableDescriptor, ) error { // If DropTime isn't set, assume this drop request is from a version // 1.1 server and invoke legacy code that uses DeleteRange and range GC. // TODO(pbardea): Note that we never set the drop time for interleaved tables, // but this check was added to be more explicit about it. This should get // cleaned up. - if table.DropTime == 0 || table.IsInterleaved() { - log.Infof(ctx, "clearing data in chunks for table %d", table.ID) + if table.GetDropTime() == 0 || table.IsInterleaved() { + log.Infof(ctx, "clearing data in chunks for table %d", table.GetID()) return sql.ClearTableDataInChunks(ctx, db, codec, table, false /* traceKV */) } - log.Infof(ctx, "clearing data for table %d", table.ID) + log.Infof(ctx, "clearing data for table %d", table.GetID()) - tableKey := roachpb.RKey(codec.TablePrefix(uint32(table.ID))) + tableKey := roachpb.RKey(codec.TablePrefix(uint32(table.GetID()))) tableSpan := roachpb.RSpan{Key: tableKey, EndKey: tableKey.PrefixEnd()} // ClearRange requests lays down RocksDB range deletion tombstones that have diff --git a/pkg/sql/insert_fast_path.go b/pkg/sql/insert_fast_path.go index f1e0015d182c..0e29d1451a28 100644 --- a/pkg/sql/insert_fast_path.go +++ b/pkg/sql/insert_fast_path.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -87,7 +86,7 @@ type insertFastPathFKSpanInfo struct { type insertFastPathFKCheck struct { exec.InsertFastPathFKCheck - tabDesc *tabledesc.Immutable + tabDesc catalog.TableDescriptor idxDesc *descpb.IndexDescriptor keyPrefix []byte colMap catalog.TableColMap diff --git a/pkg/sql/materialized_view_test.go b/pkg/sql/materialized_view_test.go index 1efca62878aa..8be32b007087 100644 --- a/pkg/sql/materialized_view_test.go +++ b/pkg/sql/materialized_view_test.go @@ -66,14 +66,14 @@ REFRESH MATERIALIZED VIEW t.v; } // Add a zone config to delete all table data. - _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, descBeforeRefresh.ID) + _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, descBeforeRefresh.GetID()) if err != nil { t.Fatal(err) } // The data should be deleted. testutils.SucceedsSoon(t, func() error { - indexPrefix := keys.SystemSQLCodec.IndexPrefix(uint32(descBeforeRefresh.ID), uint32(descBeforeRefresh.GetPrimaryIndexID())) + indexPrefix := keys.SystemSQLCodec.IndexPrefix(uint32(descBeforeRefresh.GetID()), uint32(descBeforeRefresh.GetPrimaryIndexID())) indexEnd := indexPrefix.PrefixEnd() if kvs, err := kvDB.Scan(ctx, indexPrefix, indexEnd, 0); err != nil { t.Fatal(err) @@ -182,7 +182,7 @@ CREATE MATERIALIZED VIEW t.v AS SELECT x FROM t.t; descBeforeRefresh := catalogkv.TestingGetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "v") // Add a zone config to delete all table data. - _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, descBeforeRefresh.ID) + _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, descBeforeRefresh.GetID()) if err != nil { t.Fatal(err) } @@ -193,7 +193,7 @@ CREATE MATERIALIZED VIEW t.v AS SELECT x FROM t.t; } testutils.SucceedsSoon(t, func() error { - tableStart := keys.SystemSQLCodec.TablePrefix(uint32(descBeforeRefresh.ID)) + tableStart := keys.SystemSQLCodec.TablePrefix(uint32(descBeforeRefresh.GetID())) tableEnd := tableStart.PrefixEnd() if kvs, err := kvDB.Scan(ctx, tableStart, tableEnd, 0); err != nil { t.Fatal(err) @@ -228,7 +228,7 @@ CREATE MATERIALIZED VIEW t.v AS SELECT x FROM t.t; `) desc := catalogkv.TestingGetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "v") // Add a zone config to delete all table data. - _, err := sqltestutils.AddImmediateGCZoneConfig(sqlRaw, desc.ID) + _, err := sqltestutils.AddImmediateGCZoneConfig(sqlRaw, desc.GetID()) require.NoError(t, err) // Now drop the view. @@ -237,7 +237,7 @@ CREATE MATERIALIZED VIEW t.v AS SELECT x FROM t.t; // All of the table data should be cleaned up. testutils.SucceedsSoon(t, func() error { - tableStart := keys.SystemSQLCodec.TablePrefix(uint32(desc.ID)) + tableStart := keys.SystemSQLCodec.TablePrefix(uint32(desc.GetID())) tableEnd := tableStart.PrefixEnd() if kvs, err := kvDB.Scan(ctx, tableStart, tableEnd, 0); err != nil { t.Fatal(err) diff --git a/pkg/sql/old_foreign_key_desc_test.go b/pkg/sql/old_foreign_key_desc_test.go index 240c69a6cc4b..00fd31cae192 100644 --- a/pkg/sql/old_foreign_key_desc_test.go +++ b/pkg/sql/old_foreign_key_desc_test.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" @@ -52,19 +53,19 @@ CREATE INDEX ON t.t1 (x); desc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "t1") desc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "t2") // Remember the old foreign keys. - oldInboundFKs := append([]descpb.ForeignKeyConstraint{}, desc.InboundFKs...) + oldInboundFKs := append([]descpb.ForeignKeyConstraint{}, desc.GetInboundFKs()...) // downgradeForeignKey downgrades a table descriptor's foreign key representation // to the pre-19.2 table descriptor format where foreign key information // is stored on the index. - downgradeForeignKey := func(tbl *tabledesc.Immutable) *tabledesc.Immutable { + downgradeForeignKey := func(tbl catalog.TableDescriptor) catalog.TableDescriptor { // Downgrade the outbound foreign keys. - for i := range tbl.OutboundFKs { - fk := &tbl.OutboundFKs[i] + for i := range tbl.GetOutboundFKs() { + fk := &tbl.GetOutboundFKs()[i] idx, err := tabledesc.FindFKOriginIndex(tbl, fk.OriginColumnIDs) if err != nil { t.Fatal(err) } - var referencedTbl *tabledesc.Immutable + var referencedTbl catalog.TableDescriptor err = kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { referencedTbl, err = catalogkv.MustGetTableDescByID(ctx, txn, keys.SystemSQLCodec, fk.ReferencedTableID) return err @@ -87,15 +88,15 @@ CREATE INDEX ON t.t1 (x); Match: fk.Match, } } - tbl.OutboundFKs = nil + tbl.TableDesc().OutboundFKs = nil // Downgrade the inbound foreign keys. - for i := range tbl.InboundFKs { - fk := &tbl.InboundFKs[i] + for i := range tbl.GetInboundFKs() { + fk := &tbl.GetInboundFKs()[i] refIdx, err := tabledesc.FindFKReferencedUniqueConstraint(desc, fk.ReferencedColumnIDs) if err != nil { t.Fatal(err) } - var originTbl *tabledesc.Immutable + var originTbl catalog.TableDescriptor if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { originTbl, err = catalogkv.MustGetTableDescByID(ctx, txn, keys.SystemSQLCodec, fk.OriginTableID) return err @@ -114,13 +115,13 @@ CREATE INDEX ON t.t1 (x); idx := refIdx.(*descpb.IndexDescriptor) idx.ReferencedBy = append(idx.ReferencedBy, fkRef) } - tbl.InboundFKs = nil + tbl.TableDesc().InboundFKs = nil return tbl } err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() newDesc := downgradeForeignKey(desc) - if err := catalogkv.WriteDescToBatch(ctx, false, s.ClusterSettings(), b, keys.SystemSQLCodec, desc.ID, newDesc); err != nil { + if err := catalogkv.WriteDescToBatch(ctx, false, s.ClusterSettings(), b, keys.SystemSQLCodec, desc.GetID(), newDesc); err != nil { return err } return txn.Run(ctx, b) @@ -135,13 +136,13 @@ CREATE INDEX ON t.t1 (x); desc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "t2") // Remove the validity field on all the descriptors for comparison, since // foreign keys on the referenced side's validity is not always updated correctly. - for i := range desc.InboundFKs { - desc.InboundFKs[i].Validity = descpb.ConstraintValidity_Validated + for i := range desc.TableDesc().InboundFKs { + desc.TableDesc().InboundFKs[i].Validity = descpb.ConstraintValidity_Validated } for i := range oldInboundFKs { oldInboundFKs[i].Validity = descpb.ConstraintValidity_Validated } - if !reflect.DeepEqual(desc.InboundFKs, oldInboundFKs) { - t.Error("expected fks", oldInboundFKs, "but found", desc.InboundFKs) + if !reflect.DeepEqual(desc.GetInboundFKs(), oldInboundFKs) { + t.Error("expected fks", oldInboundFKs, "but found", desc.GetInboundFKs()) } } diff --git a/pkg/sql/opt_catalog.go b/pkg/sql/opt_catalog.go index 6171993cd325..8715bfaaec45 100644 --- a/pkg/sql/opt_catalog.go +++ b/pkg/sql/opt_catalog.go @@ -27,7 +27,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -59,7 +58,7 @@ type optCatalog struct { // repeated calls for the same data source. // Note that the data source object might still need to be recreated if // something outside of the descriptor has changed (e.g. table stats). - dataSources map[*tabledesc.Immutable]cat.DataSource + dataSources map[catalog.TableDescriptor]cat.DataSource // tn is a temporary name used during resolution to avoid heap allocation. tn tree.TableName @@ -72,7 +71,7 @@ var _ cat.Catalog = &optCatalog{} // called for each query. func (oc *optCatalog) init(planner *planner) { oc.planner = planner - oc.dataSources = make(map[*tabledesc.Immutable]cat.DataSource) + oc.dataSources = make(map[catalog.TableDescriptor]cat.DataSource) } // reset prepares the optCatalog to be used for a new query. @@ -81,7 +80,7 @@ func (oc *optCatalog) reset() { // This deals with possible edge cases where we do a lot of DDL in a // long-lived session. if len(oc.dataSources) > 100 { - oc.dataSources = make(map[*tabledesc.Immutable]cat.DataSource) + oc.dataSources = make(map[catalog.TableDescriptor]cat.DataSource) } oc.cfg = oc.planner.execCfg.SystemConfig.GetSystemConfig() @@ -276,7 +275,7 @@ func getDescFromCatalogObjectForPermissions(o cat.Object) (catalog.Descriptor, e } } -func getDescForDataSource(o cat.DataSource) (*tabledesc.Immutable, error) { +func getDescForDataSource(o cat.DataSource) (catalog.TableDescriptor, error) { switch t := o.(type) { case *optTable: return t.desc, nil @@ -347,18 +346,18 @@ func (oc *optCatalog) fullyQualifiedNameWithTxn( return cat.DataSourceName{}, err } - dbID := desc.ParentID + dbID := desc.GetParentID() dbDesc, err := catalogkv.MustGetDatabaseDescByID(ctx, txn, oc.codec(), dbID) if err != nil { return cat.DataSourceName{}, err } - return tree.MakeTableName(tree.Name(dbDesc.GetName()), tree.Name(desc.Name)), nil + return tree.MakeTableName(tree.Name(dbDesc.GetName()), tree.Name(desc.GetName())), nil } // dataSourceForDesc returns a data source wrapper for the given descriptor. // The wrapper might come from the cache, or it may be created now. func (oc *optCatalog) dataSourceForDesc( - ctx context.Context, flags cat.Flags, desc *tabledesc.Immutable, name *cat.DataSourceName, + ctx context.Context, flags cat.Flags, desc catalog.TableDescriptor, name *cat.DataSourceName, ) (cat.DataSource, error) { // Because they are backed by physical data, we treat materialized views // as tables for the purposes of planning. @@ -390,7 +389,7 @@ func (oc *optCatalog) dataSourceForDesc( // dataSourceForTable returns a table data source wrapper for the given descriptor. // The wrapper might come from the cache, or it may be created now. func (oc *optCatalog) dataSourceForTable( - ctx context.Context, flags cat.Flags, desc *tabledesc.Immutable, name *cat.DataSourceName, + ctx context.Context, flags cat.Flags, desc catalog.TableDescriptor, name *cat.DataSourceName, ) (cat.DataSource, error) { if desc.IsVirtualTable() { // Virtual tables can have multiple effective instances that utilize the @@ -404,7 +403,7 @@ func (oc *optCatalog) dataSourceForTable( var tableStats []*stats.TableStatistic if !flags.NoTableStats { var err error - tableStats, err = oc.planner.execCfg.TableStatsCache.GetTableStats(context.TODO(), desc.ID) + tableStats, err = oc.planner.execCfg.TableStatsCache.GetTableStats(context.TODO(), desc.GetID()) if err != nil { // Ignore any error. We still want to be able to run queries even if we lose // access to the statistics table. @@ -438,14 +437,14 @@ var emptyZoneConfig = &zonepb.ZoneConfig{} // ZoneConfigs are stored in protobuf binary format in the SystemConfig, which // is gossiped around the cluster. Note that the returned ZoneConfig might be // somewhat stale, since it's taken from the gossiped SystemConfig. -func (oc *optCatalog) getZoneConfig(desc *tabledesc.Immutable) (*zonepb.ZoneConfig, error) { +func (oc *optCatalog) getZoneConfig(desc catalog.TableDescriptor) (*zonepb.ZoneConfig, error) { // Lookup table's zone if system config is available (it may not be as node // is starting up and before it's received the gossiped config). If it is // not available, use an empty config that has no zone constraints. if oc.cfg == nil || desc.IsVirtualTable() { return emptyZoneConfig, nil } - zone, err := oc.cfg.GetZoneConfigForObject(oc.codec(), uint32(desc.ID)) + zone, err := oc.cfg.GetZoneConfigForObject(oc.codec(), uint32(desc.GetID())) if err != nil { return nil, err } @@ -460,26 +459,26 @@ func (oc *optCatalog) codec() keys.SQLCodec { return oc.planner.ExecCfg().Codec } -// optView is a wrapper around sqlbase.Immutable that implements +// optView is a wrapper around catalog.TableDescriptor that implements // the cat.Object, cat.DataSource, and cat.View interfaces. type optView struct { - desc *tabledesc.Immutable + desc catalog.TableDescriptor } var _ cat.View = &optView{} -func newOptView(desc *tabledesc.Immutable) *optView { +func newOptView(desc catalog.TableDescriptor) *optView { return &optView{desc: desc} } // ID is part of the cat.Object interface. func (ov *optView) ID() cat.StableID { - return cat.StableID(ov.desc.ID) + return cat.StableID(ov.desc.GetID()) } // PostgresDescriptorID is part of the cat.Object interface. func (ov *optView) PostgresDescriptorID() cat.StableID { - return cat.StableID(ov.desc.ID) + return cat.StableID(ov.desc.GetID()) } // Equals is part of the cat.Object interface. @@ -488,12 +487,12 @@ func (ov *optView) Equals(other cat.Object) bool { if !ok { return false } - return ov.desc.ID == otherView.desc.ID && ov.desc.Version == otherView.desc.Version + return ov.desc.GetID() == otherView.desc.GetID() && ov.desc.GetVersion() == otherView.desc.GetVersion() } // Name is part of the cat.View interface. func (ov *optView) Name() tree.Name { - return tree.Name(ov.desc.Name) + return tree.Name(ov.desc.GetName()) } // IsSystemView is part of the cat.View interface. @@ -503,40 +502,40 @@ func (ov *optView) IsSystemView() bool { // Query is part of the cat.View interface. func (ov *optView) Query() string { - return ov.desc.ViewQuery + return ov.desc.GetViewQuery() } // ColumnNameCount is part of the cat.View interface. func (ov *optView) ColumnNameCount() int { - return len(ov.desc.Columns) + return len(ov.desc.GetPublicColumns()) } // ColumnName is part of the cat.View interface. func (ov *optView) ColumnName(i int) tree.Name { - return tree.Name(ov.desc.Columns[i].Name) + return tree.Name(ov.desc.GetPublicColumns()[i].Name) } -// optSequence is a wrapper around sqlbase.Immutable that +// optSequence is a wrapper around catalog.TableDescriptor that // implements the cat.Object and cat.DataSource interfaces. type optSequence struct { - desc *tabledesc.Immutable + desc catalog.TableDescriptor } var _ cat.DataSource = &optSequence{} var _ cat.Sequence = &optSequence{} -func newOptSequence(desc *tabledesc.Immutable) *optSequence { +func newOptSequence(desc catalog.TableDescriptor) *optSequence { return &optSequence{desc: desc} } // ID is part of the cat.Object interface. func (os *optSequence) ID() cat.StableID { - return cat.StableID(os.desc.ID) + return cat.StableID(os.desc.GetID()) } // PostgresDescriptorID is part of the cat.Object interface. func (os *optSequence) PostgresDescriptorID() cat.StableID { - return cat.StableID(os.desc.ID) + return cat.StableID(os.desc.GetID()) } // Equals is part of the cat.Object interface. @@ -545,21 +544,21 @@ func (os *optSequence) Equals(other cat.Object) bool { if !ok { return false } - return os.desc.ID == otherSeq.desc.ID && os.desc.Version == otherSeq.desc.Version + return os.desc.GetID() == otherSeq.desc.GetID() && os.desc.GetVersion() == otherSeq.desc.GetVersion() } // Name is part of the cat.Sequence interface. func (os *optSequence) Name() tree.Name { - return tree.Name(os.desc.Name) + return tree.Name(os.desc.GetName()) } // SequenceMarker is part of the cat.Sequence interface. func (os *optSequence) SequenceMarker() {} -// optTable is a wrapper around sqlbase.Immutable that caches +// optTable is a wrapper around catalog.TableDescriptor that caches // index wrappers and maintains a ColumnID => Column mapping for fast lookup. type optTable struct { - desc *tabledesc.Immutable + desc catalog.TableDescriptor // columns contains all the columns presented to the catalog. This includes: // - ordinary table columns (those in the table descriptor) @@ -613,7 +612,7 @@ type optTable struct { var _ cat.Table = &optTable{} func newOptTable( - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, codec keys.SQLCodec, stats []*stats.TableStatistic, tblZone *zonepb.ZoneConfig, @@ -637,7 +636,7 @@ func newOptTable( } ot.columns = make([]cat.Column, len(colDescs), numCols) - numOrdinary := len(ot.desc.Columns) + numOrdinary := len(ot.desc.GetPublicColumns()) numWritable := len(ot.desc.WritableColumns()) for ordinal := range colDescs { desc := colDescs[ordinal] @@ -721,9 +720,9 @@ func newOptTable( // Add unique without index constraints. Constraints for implicitly // partitioned unique indexes will be added below. - ot.uniqueConstraints = make([]optUniqueConstraint, 0, len(ot.desc.UniqueWithoutIndexConstraints)) - for i := range ot.desc.UniqueWithoutIndexConstraints { - u := &ot.desc.UniqueWithoutIndexConstraints[i] + ot.uniqueConstraints = make([]optUniqueConstraint, 0, len(ot.desc.GetUniqueWithoutIndexConstraints())) + for i := range ot.desc.GetUniqueWithoutIndexConstraints() { + u := &ot.desc.GetUniqueWithoutIndexConstraints()[i] ot.uniqueConstraints = append(ot.uniqueConstraints, optUniqueConstraint{ name: u.Name, table: ot.ID(), @@ -795,8 +794,8 @@ func newOptTable( } } - for i := range ot.desc.OutboundFKs { - fk := &ot.desc.OutboundFKs[i] + for i := range ot.desc.GetOutboundFKs() { + fk := &ot.desc.GetOutboundFKs()[i] ot.outboundFKs = append(ot.outboundFKs, optForeignKeyConstraint{ name: fk.Name, originTable: ot.ID(), @@ -809,8 +808,8 @@ func newOptTable( updateAction: fk.OnUpdate, }) } - for i := range ot.desc.InboundFKs { - fk := &ot.desc.InboundFKs[i] + for i := range ot.desc.GetInboundFKs() { + fk := &ot.desc.GetInboundFKs()[i] ot.inboundFKs = append(ot.inboundFKs, optForeignKeyConstraint{ name: fk.Name, originTable: cat.StableID(fk.OriginTableID), @@ -824,10 +823,10 @@ func newOptTable( }) } - ot.primaryFamily.init(ot, &desc.Families[0]) - ot.families = make([]optFamily, len(desc.Families)-1) + ot.primaryFamily.init(ot, &desc.GetFamilies()[0]) + ot.families = make([]optFamily, len(desc.GetFamilies())-1) for i := range ot.families { - ot.families[i].init(ot, &desc.Families[i+1]) + ot.families[i].init(ot, &desc.GetFamilies()[i+1]) } // Synthesize any check constraints for user defined types. @@ -886,18 +885,18 @@ func newOptTable( // ID is part of the cat.Object interface. func (ot *optTable) ID() cat.StableID { - return cat.StableID(ot.desc.ID) + return cat.StableID(ot.desc.GetID()) } // PostgresDescriptorID is part of the cat.Object interface. func (ot *optTable) PostgresDescriptorID() cat.StableID { - return cat.StableID(ot.desc.ID) + return cat.StableID(ot.desc.GetID()) } // isStale checks if the optTable object needs to be refreshed because the stats, // zone config, or used types have changed. False positives are ok. func (ot *optTable) isStale( - rawDesc *tabledesc.Immutable, tableStats []*stats.TableStatistic, zone *zonepb.ZoneConfig, + rawDesc catalog.TableDescriptor, tableStats []*stats.TableStatistic, zone *zonepb.ZoneConfig, ) bool { // Fast check to verify that the statistics haven't changed: we check the // length and the address of the underlying array. This is not a perfect @@ -929,7 +928,7 @@ func (ot *optTable) Equals(other cat.Object) bool { // Fast path when it is the same object. return true } - if ot.desc.ID != otherTable.desc.ID || ot.desc.Version != otherTable.desc.Version { + if ot.desc.GetID() != otherTable.desc.GetID() || ot.desc.GetVersion() != otherTable.desc.GetVersion() { return false } @@ -969,7 +968,7 @@ func (ot *optTable) Equals(other cat.Object) bool { // Name is part of the cat.Table interface. func (ot *optTable) Name() tree.Name { - return tree.Name(ot.desc.Name) + return tree.Name(ot.desc.GetName()) } // IsVirtualTable is part of the cat.Table interface. @@ -1292,7 +1291,7 @@ func (oi *optIndex) Span() roachpb.Span { desc := oi.tab.desc // Tables up to MaxSystemConfigDescID are grouped in a single system config // span. - if desc.ID <= keys.MaxSystemConfigDescID { + if desc.GetID() <= keys.MaxSystemConfigDescID { return keys.SystemConfigSpan } return desc.IndexSpan(oi.tab.codec, oi.desc.ID) @@ -1625,7 +1624,7 @@ func (fk *optForeignKeyConstraint) UpdateReferenceAction() tree.ReferenceAction // optVirtualTable is similar to optTable but is used with virtual tables. type optVirtualTable struct { - desc *tabledesc.Immutable + desc catalog.TableDescriptor // columns contains all the columns presented to the catalog. This includes // the dummy PK column and the columns in the table descriptor. @@ -1664,10 +1663,10 @@ type optVirtualTable struct { var _ cat.Table = &optVirtualTable{} func newOptVirtualTable( - ctx context.Context, oc *optCatalog, desc *tabledesc.Immutable, name *cat.DataSourceName, + ctx context.Context, oc *optCatalog, desc catalog.TableDescriptor, name *cat.DataSourceName, ) (*optVirtualTable, error) { // Calculate the stable ID (see the comment for optVirtualTable.id). - id := cat.StableID(desc.ID) + id := cat.StableID(desc.GetID()) if name.Catalog() != "" { // TODO(radu): it's unfortunate that we have to lookup the schema again. _, prefixI, err := oc.planner.LookupSchema(ctx, name.Catalog(), name.Schema()) @@ -1698,7 +1697,7 @@ func newOptVirtualTable( name: *name, } - ot.columns = make([]cat.Column, len(desc.Columns)+1) + ot.columns = make([]cat.Column, len(desc.GetPublicColumns())+1) // Init dummy PK column. ot.columns[0].InitNonVirtual( 0, @@ -1711,8 +1710,8 @@ func newOptVirtualTable( nil, /* defaultExpr */ nil, /* computedExpr */ ) - for i := range desc.Columns { - d := desc.Columns[i] + for i := range desc.GetPublicColumns() { + d := desc.GetPublicColumns()[i] ot.columns[i+1].InitNonVirtual( i+1, cat.StableID(d.ID), @@ -1776,7 +1775,7 @@ func (ot *optVirtualTable) ID() cat.StableID { // PostgresDescriptorID is part of the cat.Object interface. func (ot *optVirtualTable) PostgresDescriptorID() cat.StableID { - return cat.StableID(ot.desc.ID) + return cat.StableID(ot.desc.GetID()) } // Equals is part of the cat.Object interface. @@ -1789,7 +1788,7 @@ func (ot *optVirtualTable) Equals(other cat.Object) bool { // Fast path when it is the same object. return true } - if ot.id != otherTable.id || ot.desc.Version != otherTable.desc.Version { + if ot.id != otherTable.id || ot.desc.GetVersion() != otherTable.desc.GetVersion() { return false } @@ -1823,8 +1822,8 @@ func (ot *optVirtualTable) Column(i int) *cat.Column { // getColDesc is part of optCatalogTableInterface. func (ot *optVirtualTable) getColDesc(i int) *descpb.ColumnDescriptor { - if i > 0 && i <= len(ot.desc.Columns) { - return &ot.desc.Columns[i-1] + if i > 0 && i <= len(ot.desc.GetPublicColumns()) { + return &ot.desc.GetPublicColumns()[i-1] } return nil } diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index 5eec42043a32..c7055b7e4c90 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -26,7 +26,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/inverted" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" @@ -135,7 +134,7 @@ func (ef *execFactory) ConstructScan( func generateScanSpans( evalCtx *tree.EvalContext, codec keys.SQLCodec, - tabDesc *tabledesc.Immutable, + tabDesc catalog.TableDescriptor, indexDesc *descpb.IndexDescriptor, params exec.ScanParams, ) (roachpb.Spans, error) { @@ -658,7 +657,7 @@ func (ef *execFactory) constructVirtualTableLookupJoin( return nil, err } tableScan.index = indexDesc - vtableCols := colinfo.ResultColumnsFromColDescs(tableDesc.ID, tableDesc.Columns) + vtableCols := colinfo.ResultColumnsFromColDescs(tableDesc.GetID(), tableDesc.GetPublicColumns()) projectedVtableCols := planColumns(&tableScan) outputCols := make(colinfo.ResultColumns, 0, len(inputCols)+len(projectedVtableCols)) outputCols = append(outputCols, inputCols...) @@ -750,7 +749,7 @@ func (ef *execFactory) ConstructInvertedJoin( // and requested cols. func (ef *execFactory) constructScanForZigzag( indexDesc *descpb.IndexDescriptor, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, cols exec.TableColumnOrdinalSet, ) (*scanNode, error) { @@ -759,7 +758,7 @@ func (ef *execFactory) constructScanForZigzag( } for c, ok := cols.Next(0); ok; c, ok = cols.Next(c + 1) { - colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(tableDesc.Columns[c].ID)) + colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(tableDesc.GetPublicColumns()[c].ID)) } scan := ef.planner.Scan() @@ -1194,7 +1193,7 @@ func (ef *execFactory) ConstructInsert( // Set the tabColIdxToRetIdx for the mutation. Insert always returns // non-mutation columns in the same order they are defined in the table. - ins.run.tabColIdxToRetIdx = row.ColMapping(tabDesc.Columns, returnColDescs) + ins.run.tabColIdxToRetIdx = row.ColMapping(tabDesc.GetPublicColumns(), returnColDescs) ins.run.rowsNeeded = true } @@ -1269,7 +1268,7 @@ func (ef *execFactory) ConstructInsertFastPath( // Set the tabColIdxToRetIdx for the mutation. Insert always returns // non-mutation columns in the same order they are defined in the table. - ins.run.tabColIdxToRetIdx = row.ColMapping(tabDesc.Columns, returnColDescs) + ins.run.tabColIdxToRetIdx = row.ColMapping(tabDesc.GetPublicColumns(), returnColDescs) ins.run.rowsNeeded = true } @@ -1486,7 +1485,7 @@ func (ef *execFactory) ConstructUpsert( // Update the tabColIdxToRetIdx for the mutation. Upsert returns // non-mutation columns specified, in the same order they are defined // in the table. - ups.run.tw.tabColIdxToRetIdx = row.ColMapping(tabDesc.Columns, returnColDescs) + ups.run.tw.tabColIdxToRetIdx = row.ColMapping(tabDesc.GetPublicColumns(), returnColDescs) ups.run.tw.returnCols = returnColDescs ups.run.tw.rowsNeeded = true } @@ -1597,7 +1596,7 @@ func (ef *execFactory) ConstructDeleteRange( if len(interleavedTables) > 0 { dr.interleavedFastPath = true - dr.interleavedDesc = make([]*tabledesc.Immutable, len(interleavedTables)) + dr.interleavedDesc = make([]catalog.TableDescriptor, len(interleavedTables)) for i := range dr.interleavedDesc { dr.interleavedDesc[i] = interleavedTables[i].(*optTable).desc } @@ -1676,13 +1675,13 @@ func (ef *execFactory) ConstructCreateView( if !d.ColumnOrdinals.Empty() { ref.ColumnIDs = make([]descpb.ColumnID, 0, d.ColumnOrdinals.Len()) d.ColumnOrdinals.ForEach(func(ord int) { - ref.ColumnIDs = append(ref.ColumnIDs, desc.Columns[ord].ID) + ref.ColumnIDs = append(ref.ColumnIDs, desc.GetPublicColumns()[ord].ID) }) } - entry := planDeps[desc.ID] + entry := planDeps[desc.GetID()] entry.desc = desc entry.deps = append(entry.deps, ref) - planDeps[desc.ID] = entry + planDeps[desc.GetID()] = entry } return &createViewNode{ diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index ff5fca05b430..349ec9aa9f14 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -1162,8 +1162,8 @@ https://www.postgresql.org/docs/9.5/catalog-pg-depend.html`, table catalog.TableDescriptor, tableLookup tableLookupFn, ) error { - pgConstraintTableOid := tableOid(pgConstraintsDesc.ID) - pgClassTableOid := tableOid(pgClassDesc.ID) + pgConstraintTableOid := tableOid(pgConstraintsDesc.GetID()) + pgClassTableOid := tableOid(pgClassDesc.GetID()) if table.IsSequence() && !table.GetSequenceOpts().SequenceOwner.Equal(descpb.TableDescriptor_SequenceOpts_SequenceOwner{}) { refObjID := tableOid(table.GetSequenceOpts().SequenceOwner.OwnerTableID) @@ -1685,7 +1685,7 @@ https://www.postgresql.org/docs/9.6/view-pg-matviews.html`, tree.NewDName(desc.GetName()), // matviewname getOwnerName(desc), // matviewowner tree.DNull, // tablespace - tree.MakeDBool(len(desc.TableDesc().Indexes) > 0), // hasindexes + tree.MakeDBool(len(desc.PublicNonPrimaryIndexes()) > 0), // hasindexes tree.DBoolTrue, // ispopulated, tree.NewDString(desc.GetViewQuery()), // definition ) diff --git a/pkg/sql/pgwire_internal_test.go b/pkg/sql/pgwire_internal_test.go index a49bb8b971e6..489cc3c3c8c0 100644 --- a/pkg/sql/pgwire_internal_test.go +++ b/pkg/sql/pgwire_internal_test.go @@ -78,7 +78,7 @@ func TestPGWireConnectionCloseReleasesLeases(t *testing.T) { lm.VisitLeases(func( desc catalog.Descriptor, dropped bool, refCount int, expiration tree.DTimestamp, ) (wantMore bool) { - if desc.GetID() == tableDesc.ID { + if desc.GetID() == tableDesc.GetID() { leases++ } return true @@ -93,7 +93,7 @@ func TestPGWireConnectionCloseReleasesLeases(t *testing.T) { lm.VisitLeases(func( desc catalog.Descriptor, dropped bool, refCount int, expiration tree.DTimestamp, ) (wantMore bool) { - if desc.GetID() == tableDesc.ID { + if desc.GetID() == tableDesc.GetID() { totalRefCount += refCount } return true diff --git a/pkg/sql/physicalplan/BUILD.bazel b/pkg/sql/physicalplan/BUILD.bazel index 47705f8ca994..abb7b58db693 100644 --- a/pkg/sql/physicalplan/BUILD.bazel +++ b/pkg/sql/physicalplan/BUILD.bazel @@ -54,9 +54,9 @@ go_test( "//pkg/security", "//pkg/security/securitytest", "//pkg/server", + "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/catalogkv", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/distsql", "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", diff --git a/pkg/sql/physicalplan/aggregator_funcs_test.go b/pkg/sql/physicalplan/aggregator_funcs_test.go index c780fdf34bb0..2681984cbb09 100644 --- a/pkg/sql/physicalplan/aggregator_funcs_test.go +++ b/pkg/sql/physicalplan/aggregator_funcs_test.go @@ -20,8 +20,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/distsql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -117,13 +117,13 @@ func checkDistAggregationInfo( ctx context.Context, t *testing.T, srv serverutils.TestServerInterface, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, colIdx int, numRows int, fn execinfrapb.AggregatorSpec_Func, info DistAggregationInfo, ) { - colType := tableDesc.Columns[colIdx].Type + colType := tableDesc.GetPublicColumns()[colIdx].Type makeTableReader := func(startPK, endPK int, streamID int) execinfrapb.ProcessorSpec { tr := execinfrapb.TableReaderSpec{ @@ -463,9 +463,9 @@ func TestDistAggregationTable(t *testing.T) { // We're going to test each aggregation function on every column that can be // used as input for it. foundCol := false - for colIdx := 1; colIdx < len(desc.Columns); colIdx++ { + for colIdx := 1; colIdx < len(desc.GetPublicColumns()); colIdx++ { // See if this column works with this function. - _, _, err := execinfrapb.GetAggregateInfo(fn, desc.Columns[colIdx].Type) + _, _, err := execinfrapb.GetAggregateInfo(fn, desc.GetPublicColumns()[colIdx].Type) if err != nil { continue } @@ -477,7 +477,7 @@ func TestDistAggregationTable(t *testing.T) { } foundCol = true for _, numRows := range []int{5, numRows / 10, numRows / 2, numRows} { - name := fmt.Sprintf("%s/%s/%d", fn, desc.Columns[colIdx].Name, numRows) + name := fmt.Sprintf("%s/%s/%d", fn, desc.GetPublicColumns()[colIdx].Name, numRows) t.Run(name, func(t *testing.T) { checkDistAggregationInfo( context.Background(), t, tc.Server(0), desc, colIdx, numRows, fn, info) diff --git a/pkg/sql/physicalplan/span_resolver_test.go b/pkg/sql/physicalplan/span_resolver_test.go index 90ff68eb6204..c9211cff5dd6 100644 --- a/pkg/sql/physicalplan/span_resolver_test.go +++ b/pkg/sql/physicalplan/span_resolver_test.go @@ -23,8 +23,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" "github.com/cockroachdb/cockroach/pkg/sql/physicalplan/replicaoracle" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" @@ -167,7 +167,7 @@ func populateCache(db *gosql.DB, expectedNumRows int) error { // `CREATE TABLE test (k INT PRIMARY KEY)` at row with value pk (the row will be // the first on the right of the split). func splitRangeAtVal( - ts *server.TestServer, tableDesc *tabledesc.Immutable, pk int, + ts *server.TestServer, tableDesc catalog.TableDescriptor, pk int, ) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { if len(tableDesc.PublicNonPrimaryIndexes()) != 0 { return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, @@ -321,7 +321,7 @@ func TestMixedDirections(t *testing.T) { func setupRanges( db *gosql.DB, s *server.TestServer, cdb *kv.DB, t *testing.T, -) ([]roachpb.RangeDescriptor, *tabledesc.Immutable) { +) ([]roachpb.RangeDescriptor, catalog.TableDescriptor) { if _, err := db.Exec(`CREATE DATABASE t`); err != nil { t.Fatal(err) } @@ -451,7 +451,7 @@ func expectResolved(actual [][]rngInfo, expected ...[]rngInfo) error { return nil } -func makeSpan(tableDesc *tabledesc.Immutable, i, j int) roachpb.Span { +func makeSpan(tableDesc catalog.TableDescriptor, i, j int) roachpb.Span { makeKey := func(val int) roachpb.Key { key, err := rowenc.TestingMakePrimaryIndexKey(tableDesc, val) if err != nil { diff --git a/pkg/sql/planhook.go b/pkg/sql/planhook.go index 8522bf961b88..f61af5e7bba6 100644 --- a/pkg/sql/planhook.go +++ b/pkg/sql/planhook.go @@ -15,6 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -92,7 +93,7 @@ type PlanHookState interface { ctx context.Context, tn *tree.TableName, required bool, requiredType tree.RequiredTableKind, ) (table *tabledesc.Mutable, err error) ShowCreate( - ctx context.Context, dbPrefix string, allDescs []descpb.Descriptor, desc *tabledesc.Immutable, displayOptions ShowCreateDisplayOptions, + ctx context.Context, dbPrefix string, allDescs []descpb.Descriptor, desc catalog.TableDescriptor, displayOptions ShowCreateDisplayOptions, ) (string, error) CreateSchemaNamespaceEntry(ctx context.Context, schemaNameKey roachpb.Key, schemaID descpb.ID) error diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 41ba1c2bb99f..c387992bbbe4 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -28,7 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/querycache" @@ -496,7 +495,7 @@ func (p *planner) ResolveTableName(ctx context.Context, tn *tree.TableName) (tre if err != nil { return 0, err } - return tree.ID(desc.ID), nil + return tree.ID(desc.GetID()), nil } // LookupTableByID looks up a table, by the given descriptor ID. Based on the @@ -506,7 +505,7 @@ func (p *planner) ResolveTableName(ctx context.Context, tn *tree.TableName) (tre // of having its own logic for lookups. func (p *planner) LookupTableByID( ctx context.Context, tableID descpb.ID, -) (*tabledesc.Immutable, error) { +) (catalog.TableDescriptor, error) { if entry, err := p.getVirtualTabler().getVirtualTableEntryByID(tableID); err == nil { return entry.desc, nil } diff --git a/pkg/sql/reassign_owned_by.go b/pkg/sql/reassign_owned_by.go index f06db3d6197f..41e82ac010ff 100644 --- a/pkg/sql/reassign_owned_by.go +++ b/pkg/sql/reassign_owned_by.go @@ -14,6 +14,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/server/telemetry" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" @@ -150,10 +151,10 @@ func (n *reassignOwnedByNode) reassignSchemaOwner( } func (n *reassignOwnedByNode) reassignTableOwner( - tbDesc *tabledesc.Immutable, params runParams, + tbDesc catalog.TableDescriptor, params runParams, ) error { mutableTbDesc, err := params.p.Descriptors().GetMutableDescriptorByID( - params.ctx, tbDesc.ID, params.p.txn) + params.ctx, tbDesc.GetID(), params.p.txn) if err != nil { return err } diff --git a/pkg/sql/relocate.go b/pkg/sql/relocate.go index 1adabfcc0eac..40fd2bc6cf24 100644 --- a/pkg/sql/relocate.go +++ b/pkg/sql/relocate.go @@ -17,9 +17,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -30,7 +30,7 @@ type relocateNode struct { optColumnsSlot relocateLease bool - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor index *descpb.IndexDescriptor rows planNode diff --git a/pkg/sql/rename_database.go b/pkg/sql/rename_database.go index 2da355b84cf1..a95b7dbfb68e 100644 --- a/pkg/sql/rename_database.go +++ b/pkg/sql/rename_database.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -183,14 +182,14 @@ func (n *renameDatabaseNode) startExec(params runParams) error { tree.Name(tbDesc.GetName()), ) var dependentDescQualifiedString string - if dbDesc.GetID() != dependentDesc.ParentID || tbDesc.GetParentSchemaID() != dependentDesc.GetParentSchemaID() { + if dbDesc.GetID() != dependentDesc.GetParentID() || tbDesc.GetParentSchemaID() != dependentDesc.GetParentSchemaID() { descFQName, err := p.getQualifiedTableName(ctx, dependentDesc) if err != nil { log.Warningf( ctx, "unable to retrieve fully-qualified name of %s (id: %d): %v", tbTableName.String(), - dependentDesc.ID, + dependentDesc.GetID(), err, ) return sqlerrors.NewDependentObjectErrorf( @@ -202,7 +201,7 @@ func (n *renameDatabaseNode) startExec(params runParams) error { dependentDescTableName := tree.MakeTableNameWithSchema( tree.Name(dbDesc.GetName()), tree.Name(schema), - tree.Name(dependentDesc.Name), + tree.Name(dependentDesc.GetName()), ) dependentDescQualifiedString = dependentDescTableName.String() } @@ -261,7 +260,7 @@ func isAllowedDependentDescInRenameDatabase( ctx context.Context, dependedOn *descpb.TableDescriptor_Reference, tbDesc catalog.TableDescriptor, - dependentDesc *tabledesc.Immutable, + dependentDesc catalog.TableDescriptor, dbName string, ) (bool, string, error) { // If it is a sequence, and it does not contain the database name, then we have @@ -275,7 +274,7 @@ func isAllowedDependentDescInRenameDatabase( colIDs.Add(int(colID)) } - for _, column := range dependentDesc.Columns { + for _, column := range dependentDesc.GetPublicColumns() { if !colIDs.Contains(int(column.ID)) { continue } @@ -285,7 +284,7 @@ func isAllowedDependentDescInRenameDatabase( return false, "", errors.AssertionFailedf( "rename_database: expected column id %d in table id %d to have a default expr", dependedOn.ID, - dependentDesc.ID, + dependentDesc.GetID(), ) } // Try parse the default expression and find the table name direct reference. @@ -320,7 +319,7 @@ func isAllowedDependentDescInRenameDatabase( return false, "", errors.AssertionFailedf( "expected to find column ids %s in table id %d", colIDs.String(), - dependentDesc.ID, + dependentDesc.GetID(), ) } return true, "", nil diff --git a/pkg/sql/rename_table.go b/pkg/sql/rename_table.go index 97e05f846f6d..cf4240a8417b 100644 --- a/pkg/sql/rename_table.go +++ b/pkg/sql/rename_table.go @@ -280,8 +280,8 @@ func (p *planner) dependentViewError( if err != nil { return err } - viewName := viewDesc.Name - if viewDesc.ParentID != parentID { + viewName := viewDesc.GetName() + if viewDesc.GetParentID() != parentID { viewFQName, err := p.getQualifiedTableName(ctx, viewDesc) if err != nil { log.Warningf(ctx, "unable to retrieve name of view %d: %v", viewID, err) diff --git a/pkg/sql/rename_test.go b/pkg/sql/rename_test.go index d82d5106e5c7..642f9a3f91b9 100644 --- a/pkg/sql/rename_test.go +++ b/pkg/sql/rename_test.go @@ -53,10 +53,10 @@ func TestRenameTable(t *testing.T) { // Check the table descriptor. tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "foo") - if tableDesc.Name != oldName { + if tableDesc.GetName() != oldName { t.Fatalf("Wrong table name, expected %s, got: %+v", oldName, tableDesc) } - if tableDesc.ParentID != oldDBID { + if tableDesc.GetParentID() != oldDBID { t.Fatalf("Wrong parent ID on table, expected %d, got: %+v", oldDBID, tableDesc) } @@ -75,15 +75,15 @@ func TestRenameTable(t *testing.T) { // Check the table descriptor again. renamedDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test2", "bar") - if renamedDesc.Name != newName { + if renamedDesc.GetName() != newName { t.Fatalf("Wrong table name, expected %s, got: %+v", newName, tableDesc) } - if renamedDesc.ParentID != newDBID { + if renamedDesc.GetParentID() != newDBID { t.Fatalf("Wrong parent ID on table, expected %d, got: %+v", newDBID, tableDesc) } - if renamedDesc.ID != tableDesc.ID { + if renamedDesc.GetID() != tableDesc.GetID() { t.Fatalf("Wrong ID after rename, got %d, expected %d", - renamedDesc.ID, tableDesc.ID) + renamedDesc.GetID(), tableDesc.GetID()) } } @@ -148,7 +148,7 @@ CREATE TABLE test.t (a INT PRIMARY KEY); tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") mu.Lock() - waitTableID = tableDesc.ID + waitTableID = tableDesc.GetID() mu.Unlock() txn, err := db.Begin() @@ -210,7 +210,7 @@ CREATE TABLE test.t (a INT PRIMARY KEY); s.LeaseManager().(*lease.Manager).VisitLeases(func( desc catalog.Descriptor, dropped bool, refCount int, expiration tree.DTimestamp, ) (wantMore bool) { - if desc.GetID() == tableDesc.ID && desc.GetName() == "t" { + if desc.GetID() == tableDesc.GetID() && desc.GetName() == "t" { foundLease = true } return true @@ -391,7 +391,7 @@ CREATE TABLE test.t (a INT PRIMARY KEY); // schema changes and one increment for signaling of the completion of the // drain. See the above comment for an explanation of why there's only one // expected version update for draining names. - expectedVersion := tableDesc.Version + 3 + expectedVersion := tableDesc.GetVersion() + 3 // Concurrently, rename the table. start := startRename @@ -414,7 +414,7 @@ CREATE TABLE test.t (a INT PRIMARY KEY); // Table rename to t3 was successful. tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t3") - if version := tableDesc.Version; expectedVersion != version { + if version := tableDesc.GetVersion(); expectedVersion != version { t.Fatalf("version mismatch: expected = %d, current = %d", expectedVersion, version) } diff --git a/pkg/sql/resolver.go b/pkg/sql/resolver.go index dee6f84230fb..45a972fd8d99 100644 --- a/pkg/sql/resolver.go +++ b/pkg/sql/resolver.go @@ -122,7 +122,7 @@ func (p *planner) ResolveMutableTableDescriptor( func (p *planner) ResolveUncachedTableDescriptor( ctx context.Context, tn *tree.TableName, required bool, requiredType tree.RequiredTableKind, -) (table *tabledesc.Immutable, err error) { +) (table catalog.TableDescriptor, err error) { p.runWithOptions(resolveFlags{skipCache: true}, func() { lookupFlags := tree.ObjectLookupFlags{ CommonLookupFlags: tree.CommonLookupFlags{Required: required}, @@ -657,7 +657,7 @@ func (p *planner) getTableAndIndex( return nil, nil, err } optIdx := idx.(*optIndex) - return tabledesc.NewExistingMutable(optIdx.tab.desc.TableDescriptor), optIdx.desc, nil + return tabledesc.NewExistingMutable(*optIdx.tab.desc.TableDesc()), optIdx.desc, nil } // expandTableGlob expands pattern into a list of objects represented @@ -723,7 +723,7 @@ type internalLookupCtx struct { schemaDescs map[descpb.ID]*schemadesc.Immutable schemaNames map[descpb.ID]string schemaIDs []descpb.ID - tbDescs map[descpb.ID]*tabledesc.Immutable + tbDescs map[descpb.ID]catalog.TableDescriptor tbIDs []descpb.ID typDescs map[descpb.ID]*typedesc.Immutable typIDs []descpb.ID @@ -811,7 +811,7 @@ func newInternalLookupCtx( schemaNames := map[descpb.ID]string{ keys.PublicSchemaID: tree.PublicSchema, } - tbDescs := make(map[descpb.ID]*tabledesc.Immutable) + tbDescs := make(map[descpb.ID]catalog.TableDescriptor) typDescs := make(map[descpb.ID]*typedesc.Immutable) var tbIDs, typIDs, dbIDs, schemaIDs []descpb.ID // Record descriptors for name lookups. @@ -824,11 +824,11 @@ func newInternalLookupCtx( // Only make the database visible for iteration if the prefix was included. dbIDs = append(dbIDs, desc.GetID()) } - case *tabledesc.Immutable: + case catalog.TableDescriptor: tbDescs[desc.GetID()] = desc - if prefix == nil || prefix.GetID() == desc.ParentID { + if prefix == nil || prefix.GetID() == desc.GetParentID() { // Only make the table visible for iteration if the prefix was included. - tbIDs = append(tbIDs, desc.ID) + tbIDs = append(tbIDs, desc.GetID()) } case *typedesc.Immutable: typDescs[desc.GetID()] = desc @@ -1066,7 +1066,7 @@ func (p *planner) ResolveUncachedTableDescriptorEx( name *tree.UnresolvedObjectName, required bool, requiredType tree.RequiredTableKind, -) (table *tabledesc.Immutable, err error) { +) (table catalog.TableDescriptor, err error) { p.runWithOptions(resolveFlags{skipCache: true}, func() { table, err = p.ResolveExistingObjectEx(ctx, name, required, requiredType) }) @@ -1090,7 +1090,7 @@ func (p *planner) ResolveExistingObjectEx( name *tree.UnresolvedObjectName, required bool, requiredType tree.RequiredTableKind, -) (res *tabledesc.Immutable, err error) { +) (res catalog.TableDescriptor, err error) { lookupFlags := tree.ObjectLookupFlags{ CommonLookupFlags: tree.CommonLookupFlags{Required: required}, DesiredObjectKind: tree.TableObject, @@ -1102,7 +1102,7 @@ func (p *planner) ResolveExistingObjectEx( } tn := tree.MakeTableNameFromPrefix(prefix, tree.Name(name.Object())) name.SetAnnotation(&p.semaCtx.Annotations, &tn) - table := desc.(*tabledesc.Immutable) + table := desc.(catalog.TableDescriptor) // Ensure that the user can access the target schema. if err := p.canResolveDescUnderSchema(ctx, table.GetParentSchemaID(), table); err != nil { diff --git a/pkg/sql/revert.go b/pkg/sql/revert.go index 8c11a550c58b..504d12c16164 100644 --- a/pkg/sql/revert.go +++ b/pkg/sql/revert.go @@ -15,8 +15,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" @@ -33,13 +33,13 @@ func RevertTables( ctx context.Context, db *kv.DB, execCfg *ExecutorConfig, - tables []*tabledesc.Immutable, + tables []catalog.TableDescriptor, targetTime hlc.Timestamp, batchSize int64, ) error { reverting := make(map[descpb.ID]bool, len(tables)) for i := range tables { - reverting[tables[i].ID] = true + reverting[tables[i].GetID()] = true } spans := make([]roachpb.Span, 0, len(tables)) @@ -47,12 +47,12 @@ func RevertTables( // Check that all the tables are revertable -- i.e. offline and that their // full interleave hierarchy is being reverted. for i := range tables { - if tables[i].State != descpb.DescriptorState_OFFLINE { + if tables[i].GetState() != descpb.DescriptorState_OFFLINE { return errors.New("only offline tables can be reverted") } if !tables[i].IsPhysicalTable() { - return errors.Errorf("cannot revert virtual table %s", tables[i].Name) + return errors.Errorf("cannot revert virtual table %s", tables[i].GetName()) } for _, idx := range tables[i].NonDropIndexes() { for j := 0; j < idx.NumInterleaveAncestors(); j++ { @@ -74,7 +74,7 @@ func RevertTables( for i := range tables { // This is a) rare and b) probably relevant if we are looking at logs so it // probably makes sense to log it without a verbosity filter. - log.Infof(ctx, "reverting table %s (%d) to time %v", tables[i].Name, tables[i].ID, targetTime) + log.Infof(ctx, "reverting table %s (%d) to time %v", tables[i].GetName(), tables[i].GetID(), targetTime) } // TODO(dt): pre-split requests up using a rangedesc cache and run batches in diff --git a/pkg/sql/revert_test.go b/pkg/sql/revert_test.go index af7f97fe2165..7678c7921b0e 100644 --- a/pkg/sql/revert_test.go +++ b/pkg/sql/revert_test.go @@ -20,9 +20,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -79,8 +79,8 @@ func TestRevertTable(t *testing.T) { // Revert the table to ts. desc := catalogkv.TestingGetTableDescriptor(kv, keys.SystemSQLCodec, "test", "test") - desc.State = descpb.DescriptorState_OFFLINE // bypass the offline check. - require.NoError(t, sql.RevertTables(context.Background(), kv, &execCfg, []*tabledesc.Immutable{desc}, targetTime, 10)) + desc.TableDesc().State = descpb.DescriptorState_OFFLINE // bypass the offline check. + require.NoError(t, sql.RevertTables(context.Background(), kv, &execCfg, []catalog.TableDescriptor{desc}, targetTime, 10)) var reverted int db.QueryRow(t, `SELECT xor_agg(k # rev) FROM test`).Scan(&reverted) @@ -105,18 +105,18 @@ func TestRevertTable(t *testing.T) { // Revert the table to ts. desc := catalogkv.TestingGetTableDescriptor(kv, keys.SystemSQLCodec, "test", "test") - desc.State = descpb.DescriptorState_OFFLINE + desc.TableDesc().State = descpb.DescriptorState_OFFLINE child := catalogkv.TestingGetTableDescriptor(kv, keys.SystemSQLCodec, "test", "child") - child.State = descpb.DescriptorState_OFFLINE + child.TableDesc().State = descpb.DescriptorState_OFFLINE t.Run("reject only parent", func(t *testing.T) { - require.Error(t, sql.RevertTables(ctx, kv, &execCfg, []*tabledesc.Immutable{desc}, targetTime, 10)) + require.Error(t, sql.RevertTables(ctx, kv, &execCfg, []catalog.TableDescriptor{desc}, targetTime, 10)) }) t.Run("reject only child", func(t *testing.T) { - require.Error(t, sql.RevertTables(ctx, kv, &execCfg, []*tabledesc.Immutable{child}, targetTime, 10)) + require.Error(t, sql.RevertTables(ctx, kv, &execCfg, []catalog.TableDescriptor{child}, targetTime, 10)) }) t.Run("rollback parent and child", func(t *testing.T) { - require.NoError(t, sql.RevertTables(ctx, kv, &execCfg, []*tabledesc.Immutable{desc, child}, targetTime, sql.RevertTableDefaultBatchSize)) + require.NoError(t, sql.RevertTables(ctx, kv, &execCfg, []catalog.TableDescriptor{desc, child}, targetTime, sql.RevertTableDefaultBatchSize)) var reverted, revertedChild int db.QueryRow(t, `SELECT xor_agg(k # rev) FROM test`).Scan(&reverted) diff --git a/pkg/sql/row/BUILD.bazel b/pkg/sql/row/BUILD.bazel index d6c2bbe15e07..22907d3b2def 100644 --- a/pkg/sql/row/BUILD.bazel +++ b/pkg/sql/row/BUILD.bazel @@ -33,7 +33,6 @@ go_library( "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/schemaexpr", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", "//pkg/sql/pgwire/pgcode", diff --git a/pkg/sql/row/deleter.go b/pkg/sql/row/deleter.go index 4ebe2e29232b..29ee48804127 100644 --- a/pkg/sql/row/deleter.go +++ b/pkg/sql/row/deleter.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -42,7 +41,7 @@ type Deleter struct { // FetchCols; otherwise, all columns that are part of the key of any index // (either primary or secondary) are included in FetchCols. func MakeDeleter( - codec keys.SQLCodec, tableDesc *tabledesc.Immutable, requestedCols []descpb.ColumnDescriptor, + codec keys.SQLCodec, tableDesc catalog.TableDescriptor, requestedCols []descpb.ColumnDescriptor, ) Deleter { indexes := tableDesc.DeletableNonPrimaryIndexes() indexDescs := make([]descpb.IndexDescriptor, len(indexes)) diff --git a/pkg/sql/row/expr_walker.go b/pkg/sql/row/expr_walker.go index 3c6294ddf3d7..11c5f3dc3119 100644 --- a/pkg/sql/row/expr_walker.go +++ b/pkg/sql/row/expr_walker.go @@ -19,9 +19,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" @@ -102,7 +102,7 @@ func makeBuiltinOverride( // default expressions which use sequences. type SequenceMetadata struct { id descpb.ID - seqDesc *tabledesc.Immutable + seqDesc catalog.TableDescriptor instancesPerRow int64 curChunk *jobspb.SequenceValChunk curVal int64 @@ -307,19 +307,19 @@ func (j *SeqChunkProvider) RequestChunk( func incrementSequenceByVal( ctx context.Context, - descriptor *tabledesc.Immutable, + descriptor catalog.TableDescriptor, db *kv.DB, codec keys.SQLCodec, incrementBy int64, ) (int64, error) { - seqOpts := descriptor.SequenceOpts + seqOpts := descriptor.GetSequenceOpts() var val int64 var err error // TODO(adityamaru): Think about virtual sequences. if seqOpts.Virtual { return 0, errors.New("virtual sequences are not supported by IMPORT INTO") } - seqValueKey := codec.SequenceKey(uint32(descriptor.ID)) + seqValueKey := codec.SequenceKey(uint32(descriptor.GetID())) val, err = kv.IncrementValRetryable(ctx, db, seqValueKey, incrementBy) if err != nil { if errors.HasType(err, (*roachpb.IntegerOverflowError)(nil)) { @@ -334,8 +334,8 @@ func incrementSequenceByVal( return val, nil } -func boundsExceededError(descriptor *tabledesc.Immutable) error { - seqOpts := descriptor.SequenceOpts +func boundsExceededError(descriptor catalog.TableDescriptor) error { + seqOpts := descriptor.GetSequenceOpts() isAscending := seqOpts.Increment > 0 var word string @@ -347,10 +347,11 @@ func boundsExceededError(descriptor *tabledesc.Immutable) error { word = "minimum" value = seqOpts.MinValue } + name := descriptor.GetName() return pgerror.Newf( pgcode.SequenceGeneratorLimitExceeded, `reached %s value of sequence %q (%d)`, word, - tree.ErrString((*tree.Name)(&descriptor.Name)), value) + tree.ErrString((*tree.Name)(&name)), value) } // checkForPreviouslyAllocatedChunks checks if a sequence value has already been @@ -378,7 +379,7 @@ func (j *SeqChunkProvider) checkForPreviouslyAllocatedChunks( if chunk.ChunkStartRow <= c.rowID && chunk.NextChunkStartRow > c.rowID { relativeRowIndex := c.rowID - chunk.ChunkStartRow seqMetadata.curVal = chunk.ChunkStartVal + - seqMetadata.seqDesc.SequenceOpts.Increment*(seqMetadata.instancesPerRow*relativeRowIndex) + seqMetadata.seqDesc.GetSequenceOpts().Increment*(seqMetadata.instancesPerRow*relativeRowIndex) found = true return found, nil } @@ -409,7 +410,7 @@ func reserveChunkOfSeqVals( newChunkSize = seqMetadata.instancesPerRow } - incrementValBy := newChunkSize * seqMetadata.seqDesc.SequenceOpts.Increment + incrementValBy := newChunkSize * seqMetadata.seqDesc.GetSequenceOpts().Increment // incrementSequenceByVal keeps retrying until it is able to find a slot // of incrementValBy. seqVal, err := incrementSequenceByVal(evalCtx.Context, seqMetadata.seqDesc, evalCtx.DB, @@ -420,7 +421,7 @@ func reserveChunkOfSeqVals( // Update the sequence metadata to reflect the newly reserved chunk. seqMetadata.curChunk = &jobspb.SequenceValChunk{ - ChunkStartVal: seqVal - incrementValBy + seqMetadata.seqDesc.SequenceOpts.Increment, + ChunkStartVal: seqVal - incrementValBy + seqMetadata.seqDesc.GetSequenceOpts().Increment, ChunkSize: newChunkSize, ChunkStartRow: c.rowID, NextChunkStartRow: c.rowID + (newChunkSize / seqMetadata.instancesPerRow), @@ -449,7 +450,7 @@ func importNextVal(evalCtx *tree.EvalContext, args tree.Datums) (tree.Datum, err } else { // The current chunk of sequence values can be used for the row being // processed. - seqMetadata.curVal += seqMetadata.seqDesc.SequenceOpts.Increment + seqMetadata.curVal += seqMetadata.seqDesc.GetSequenceOpts().Increment } return tree.NewDInt(tree.DInt(seqMetadata.curVal)), nil } diff --git a/pkg/sql/row/expr_walker_test.go b/pkg/sql/row/expr_walker_test.go index cfac5083bac6..9cc6876161a3 100644 --- a/pkg/sql/row/expr_walker_test.go +++ b/pkg/sql/row/expr_walker_test.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -37,12 +38,12 @@ func createAndIncrementSeqDescriptor( incrementBy int64, seqOpts descpb.TableDescriptor_SequenceOpts, db *kv.DB, -) tabledesc.Immutable { - desc := tabledesc.MakeImmutable(descpb.TableDescriptor{ +) catalog.TableDescriptor { + desc := tabledesc.NewImmutable(descpb.TableDescriptor{ ID: descpb.ID(id), SequenceOpts: &seqOpts, }) - seqValueKey := codec.SequenceKey(uint32(desc.ID)) + seqValueKey := codec.SequenceKey(uint32(desc.GetID())) _, err := kv.IncrementValRetryable( ctx, db, seqValueKey, incrementBy) require.NoError(t, err) @@ -200,7 +201,7 @@ func TestJobBackedSeqChunkProvider(t *testing.T) { test.incrementBy, test.seqIDToOpts[id], db) seqMetadata := &SequenceMetadata{ id: descpb.ID(id), - seqDesc: &seqDesc, + seqDesc: seqDesc, instancesPerRow: test.instancesPerRow, curChunk: nil, curVal: 0, diff --git a/pkg/sql/row/fetcher.go b/pkg/sql/row/fetcher.go index c17bfdfb7183..59b1e0053fec 100644 --- a/pkg/sql/row/fetcher.go +++ b/pkg/sql/row/fetcher.go @@ -23,7 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" @@ -162,12 +161,12 @@ type FetcherTableArgs struct { // InitCols initializes the columns in FetcherTableArgs. func (fta *FetcherTableArgs) InitCols( - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, scanVisibility execinfrapb.ScanVisibility, systemColumns []descpb.ColumnDescriptor, virtualColumn *descpb.ColumnDescriptor, ) { - cols := desc.Columns + cols := desc.GetPublicColumns() if scanVisibility == execinfra.ScanVisibilityPublicAndNotPublic { cols = desc.ReadableColumns() } diff --git a/pkg/sql/row/fetcher_mvcc_test.go b/pkg/sql/row/fetcher_mvcc_test.go index 5ff5d707f7ec..5e01fec46763 100644 --- a/pkg/sql/row/fetcher_mvcc_test.go +++ b/pkg/sql/row/fetcher_mvcc_test.go @@ -22,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -90,11 +89,11 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { parentDesc := catalogkv.TestingGetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, `d`, `parent`) childDesc := catalogkv.TestingGetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, `d`, `child`) var args []row.FetcherTableArgs - for _, desc := range []*tabledesc.Immutable{parentDesc, childDesc} { + for _, desc := range []catalog.TableDescriptor{parentDesc, childDesc} { var colIdxMap catalog.TableColMap var valNeededForCol util.FastIntSet - for colIdx := range desc.Columns { - id := desc.Columns[colIdx].ID + for colIdx := range desc.GetPublicColumns() { + id := desc.GetPublicColumns()[colIdx].ID colIdxMap.Set(id, colIdx) valNeededForCol.Add(colIdx) } @@ -104,7 +103,7 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { Index: desc.GetPrimaryIndex().IndexDesc(), ColIdxMap: colIdxMap, IsSecondaryIndex: false, - Cols: desc.Columns, + Cols: desc.GetPublicColumns(), ValNeededForCol: valNeededForCol, }) } diff --git a/pkg/sql/row/fetcher_test.go b/pkg/sql/row/fetcher_test.go index e729546a6f9f..0e379806d881 100644 --- a/pkg/sql/row/fetcher_test.go +++ b/pkg/sql/row/fetcher_test.go @@ -22,9 +22,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" @@ -39,7 +39,7 @@ import ( ) type initFetcherArgs struct { - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor indexIdx int valNeededForCol util.FastIntSet spans roachpb.Spans @@ -56,7 +56,7 @@ func makeFetcherArgs(entries []initFetcherArgs) []FetcherTableArgs { Index: index.IndexDesc(), ColIdxMap: entry.tableDesc.ColumnIdxMap(), IsSecondaryIndex: !index.Primary(), - Cols: entry.tableDesc.Columns, + Cols: entry.tableDesc.GetPublicColumns(), ValNeededForCol: entry.valNeededForCol, } } @@ -192,10 +192,10 @@ func TestNextRowSingle(t *testing.T) { count++ - if desc.GetID() != tableDesc.ID || index.ID != tableDesc.GetPrimaryIndexID() { + if desc.GetID() != tableDesc.GetID() || index.ID != tableDesc.GetPrimaryIndexID() { t.Fatalf( "unexpected row retrieved from fetcher.\nnexpected: table %s - index %s\nactual: table %s - index %s", - tableDesc.Name, tableDesc.GetPrimaryIndex().GetName(), + tableDesc.GetName(), tableDesc.GetPrimaryIndex().GetName(), desc.GetName(), index.Name, ) } @@ -313,10 +313,10 @@ func TestNextRowBatchLimiting(t *testing.T) { count++ - if desc.GetID() != tableDesc.ID || index.ID != tableDesc.GetPrimaryIndexID() { + if desc.GetID() != tableDesc.GetID() || index.ID != tableDesc.GetPrimaryIndexID() { t.Fatalf( "unexpected row retrieved from fetcher.\nnexpected: table %s - index %s\nactual: table %s - index %s", - tableDesc.Name, tableDesc.GetPrimaryIndex().GetName(), + tableDesc.GetName(), tableDesc.GetPrimaryIndex().GetName(), desc.GetName(), index.Name, ) } @@ -673,10 +673,10 @@ func TestNextRowSecondaryIndex(t *testing.T) { count++ - if desc.GetID() != tableDesc.ID || index.ID != tableDesc.PublicNonPrimaryIndexes()[0].GetID() { + if desc.GetID() != tableDesc.GetID() || index.ID != tableDesc.PublicNonPrimaryIndexes()[0].GetID() { t.Fatalf( "unexpected row retrieved from fetcher.\nnexpected: table %s - index %s\nactual: table %s - index %s", - tableDesc.Name, tableDesc.PublicNonPrimaryIndexes()[0].GetName(), + tableDesc.GetName(), tableDesc.PublicNonPrimaryIndexes()[0].GetName(), desc.GetName(), index.Name, ) } @@ -984,7 +984,7 @@ func TestNextRowInterleaved(t *testing.T) { for i, entry := range entries { tableDesc := catalogkv.TestingGetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, entry.tableName) indexID := tableDesc.ActiveIndexes()[entry.indexIdx].GetID() - idLookups[idLookupKey(tableDesc.ID, indexID)] = entry + idLookups[idLookupKey(tableDesc.GetID(), indexID)] = entry // We take every entry's index span (primary or // secondary) and use it to start our scan. diff --git a/pkg/sql/row/inserter.go b/pkg/sql/row/inserter.go index 220535788d90..9e66fa40a0af 100644 --- a/pkg/sql/row/inserter.go +++ b/pkg/sql/row/inserter.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -47,7 +46,7 @@ func MakeInserter( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, insertCols []descpb.ColumnDescriptor, alloc *rowenc.DatumAlloc, ) (Inserter, error) { diff --git a/pkg/sql/row/row_converter.go b/pkg/sql/row/row_converter.go index 635c7aac6db1..d0ac14ed4df6 100644 --- a/pkg/sql/row/row_converter.go +++ b/pkg/sql/row/row_converter.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" @@ -94,7 +93,7 @@ func GenerateInsertRow( insertCols []descpb.ColumnDescriptor, computedColsLookup []descpb.ColumnDescriptor, evalCtx *tree.EvalContext, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, rowVals tree.Datums, rowContainerForComputedVals *schemaexpr.RowIndexedVarContainer, ) (tree.Datums, error) { @@ -206,7 +205,7 @@ type DatumRowConverter struct { KvBatch KVBatch BatchCap int - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor // Tracks which column indices in the set of visible columns are part of the // user specified target columns. This can be used before populating Datums @@ -272,12 +271,12 @@ func (c *DatumRowConverter) getSequenceAnnotation( return err } - seqOpts := seqDesc.SequenceOpts + seqOpts := seqDesc.GetSequenceOpts() if seqOpts == nil { - return errors.Newf("descriptor %s is not a sequence", seqDesc.Name) + return errors.Newf("descriptor %s is not a sequence", seqDesc.GetName()) } - seqNameToMetadata[seqDesc.Name] = &SequenceMetadata{ + seqNameToMetadata[seqDesc.GetName()] = &SequenceMetadata{ id: seqID, seqDesc: seqDesc, } @@ -290,7 +289,7 @@ func (c *DatumRowConverter) getSequenceAnnotation( // NewDatumRowConverter returns an instance of a DatumRowConverter. func NewDatumRowConverter( ctx context.Context, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, targetColNames tree.NameList, evalCtx *tree.EvalContext, kvCh chan<- KVBatch, @@ -415,12 +414,12 @@ func NewDatumRowConverter( return nil, errors.New("unexpected hidden column") } - padding := 2 * (len(tableDesc.PublicNonPrimaryIndexes()) + len(tableDesc.Families)) + padding := 2 * (len(tableDesc.PublicNonPrimaryIndexes()) + len(tableDesc.GetFamilies())) c.BatchCap = kvDatumRowConverterBatchSize + padding c.KvBatch.KVs = make([]roachpb.KeyValue, 0, c.BatchCap) - colsOrdered := make([]descpb.ColumnDescriptor, len(c.tableDesc.Columns)) - for _, col := range c.tableDesc.Columns { + colsOrdered := make([]descpb.ColumnDescriptor, len(c.tableDesc.GetPublicColumns())) + for _, col := range c.tableDesc.GetPublicColumns() { // We prefer to have the order of columns that will be sent into // MakeComputedExprs to map that of Datums. colsOrdered[ri.InsertColIDtoRowIndex.GetDefault(col.ID)] = col @@ -433,7 +432,7 @@ func NewDatumRowConverter( colsOrdered, c.tableDesc.GetPublicColumns(), c.tableDesc, - tree.NewUnqualifiedTableName(tree.Name(c.tableDesc.Name)), + tree.NewUnqualifiedTableName(tree.Name(c.tableDesc.GetName())), c.EvalCtx, &semaCtx) if err != nil { @@ -442,7 +441,7 @@ func NewDatumRowConverter( c.computedIVarContainer = schemaexpr.RowIndexedVarContainer{ Mapping: ri.InsertColIDtoRowIndex, - Cols: tableDesc.Columns, + Cols: tableDesc.GetPublicColumns(), } return c, nil } @@ -475,7 +474,7 @@ func (c *DatumRowConverter) Row(ctx context.Context, sourceID int32, rowIndex in var computedColsLookup []descpb.ColumnDescriptor if len(c.computedExprs) > 0 { - computedColsLookup = c.tableDesc.Columns + computedColsLookup = c.tableDesc.GetPublicColumns() } insertRow, err := GenerateInsertRow( diff --git a/pkg/sql/row/updater.go b/pkg/sql/row/updater.go index ccedf4ec62ed..b2246e8958d9 100644 --- a/pkg/sql/row/updater.go +++ b/pkg/sql/row/updater.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -85,7 +84,7 @@ func MakeUpdater( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, updateCols []descpb.ColumnDescriptor, requestedCols []descpb.ColumnDescriptor, updateType rowUpdaterType, diff --git a/pkg/sql/rowenc/client_index_encoding_test.go b/pkg/sql/rowenc/client_index_encoding_test.go index 81baed783953..ce179f665852 100644 --- a/pkg/sql/rowenc/client_index_encoding_test.go +++ b/pkg/sql/rowenc/client_index_encoding_test.go @@ -701,7 +701,7 @@ func EncodeTestKey(tb testing.TB, kvDB *kv.DB, codec keys.SQLCodec, keyStr strin // Encode the table ID if the token is a table name. if tableNames[tok] { desc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, tok) - key = encoding.EncodeUvarintAscending(key, uint64(desc.ID)) + key = encoding.EncodeUvarintAscending(key, uint64(desc.GetID())) continue } diff --git a/pkg/sql/rowenc/index_encoding_test.go b/pkg/sql/rowenc/index_encoding_test.go index 0f7a11512899..636d16f0171b 100644 --- a/pkg/sql/rowenc/index_encoding_test.go +++ b/pkg/sql/rowenc/index_encoding_test.go @@ -52,7 +52,7 @@ type indexKeyTest struct { secondaryValues []tree.Datum // len must be at least secondaryInterleaveComponents+1 } -func makeTableDescForTest(test indexKeyTest) (*tabledesc.Immutable, catalog.TableColMap) { +func makeTableDescForTest(test indexKeyTest) (catalog.TableDescriptor, catalog.TableColMap) { primaryColumnIDs := make([]descpb.ColumnID, len(test.primaryValues)) secondaryColumnIDs := make([]descpb.ColumnID, len(test.secondaryValues)) columns := make([]descpb.ColumnDescriptor, len(test.primaryValues)+len(test.secondaryValues)) @@ -111,7 +111,7 @@ func makeTableDescForTest(test indexKeyTest) (*tabledesc.Immutable, catalog.Tabl } func decodeIndex( - codec keys.SQLCodec, tableDesc *tabledesc.Immutable, index *descpb.IndexDescriptor, key []byte, + codec keys.SQLCodec, tableDesc catalog.TableDescriptor, index *descpb.IndexDescriptor, key []byte, ) ([]tree.Datum, error) { types, err := colinfo.GetColumnTypes(tableDesc, index.ColumnIDs, nil) if err != nil { @@ -210,11 +210,11 @@ func TestIndexKey(t *testing.T) { colNames []string colIDs descpb.ColumnIDs ) - for _, c := range tableDesc.Columns { + for _, c := range tableDesc.GetPublicColumns() { colNames = append(colNames, c.Name) colIDs = append(colIDs, c.ID) } - tableDesc.Families = []descpb.ColumnFamilyDescriptor{{ + tableDesc.TableDesc().Families = []descpb.ColumnFamilyDescriptor{{ Name: "defaultFamily", ID: 0, ColumnNames: colNames, @@ -979,7 +979,7 @@ func TestTableEquivSignatures(t *testing.T) { tc.table.indexKeyArgs.primaryValues = tc.table.values // Setup descriptors and form an index key. desc, _ := makeTableDescForTest(tc.table.indexKeyArgs) - equivSigs, err := TableEquivSignatures(&desc.TableDescriptor, desc.GetPrimaryIndex().IndexDesc()) + equivSigs, err := TableEquivSignatures(desc.TableDesc(), desc.GetPrimaryIndex().IndexDesc()) if err != nil { t.Fatal(err) } @@ -1070,7 +1070,7 @@ func TestEquivSignature(t *testing.T) { } // Extract out the table's equivalence signature. - tempEquivSigs, err := TableEquivSignatures(&desc.TableDescriptor, desc.GetPrimaryIndex().IndexDesc()) + tempEquivSigs, err := TableEquivSignatures(desc.TableDesc(), desc.GetPrimaryIndex().IndexDesc()) if err != nil { t.Fatal(err) } diff --git a/pkg/sql/rowexec/BUILD.bazel b/pkg/sql/rowexec/BUILD.bazel index 35ce3195f656..5736789c3bef 100644 --- a/pkg/sql/rowexec/BUILD.bazel +++ b/pkg/sql/rowexec/BUILD.bazel @@ -147,7 +147,6 @@ go_test( "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", "//pkg/sql/flowinfra", diff --git a/pkg/sql/rowexec/backfiller.go b/pkg/sql/rowexec/backfiller.go index 8486faa5d967..5b3eb27b23db 100644 --- a/pkg/sql/rowexec/backfiller.go +++ b/pkg/sql/rowexec/backfiller.go @@ -200,14 +200,12 @@ func GetResumeSpans( // Find the index of the first mutation that is being worked on. const noIndex = -1 mutationIdx := noIndex - if len(tableDesc.Mutations) > 0 { - for i, m := range tableDesc.Mutations { - if m.MutationID != mutationID { - break - } - if mutationIdx == noIndex && filter(m) { - mutationIdx = i - } + for i, m := range tableDesc.GetMutations() { + if m.MutationID != mutationID { + break + } + if mutationIdx == noIndex && filter(m) { + mutationIdx = i } } @@ -218,12 +216,12 @@ func GetResumeSpans( // Find the job. var jobID int64 - if len(tableDesc.MutationJobs) > 0 { + if len(tableDesc.GetMutationJobs()) > 0 { // TODO (lucy): We need to get rid of MutationJobs. This is the only place // where we need to get the job where it's not completely straightforward to // remove the use of MutationJobs, since the backfiller doesn't otherwise // know which job it's associated with. - for _, job := range tableDesc.MutationJobs { + for _, job := range tableDesc.GetMutationJobs() { if job.MutationID == mutationID { jobID = job.JobID break diff --git a/pkg/sql/rowexec/bulk_row_writer.go b/pkg/sql/rowexec/bulk_row_writer.go index 9b0f1fe4609a..ffcec8c01863 100644 --- a/pkg/sql/rowexec/bulk_row_writer.go +++ b/pkg/sql/rowexec/bulk_row_writer.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -38,7 +39,7 @@ type bulkRowWriter struct { flowCtx *execinfra.FlowCtx processorID int32 batchIdxAtomic int64 - tableDesc tabledesc.Immutable + tableDesc catalog.TableDescriptor spec execinfrapb.BulkRowWriterSpec input execinfra.RowSource output execinfra.RowReceiver @@ -59,7 +60,7 @@ func newBulkRowWriterProcessor( flowCtx: flowCtx, processorID: processorID, batchIdxAtomic: 0, - tableDesc: tabledesc.MakeImmutable(spec.Table), + tableDesc: tabledesc.NewImmutable(spec.Table), spec: spec, input: input, output: output, @@ -103,7 +104,7 @@ func (sp *bulkRowWriter) work(ctx context.Context) error { var g ctxgroup.Group conv, err := row.NewDatumRowConverter(ctx, - &sp.tableDesc, nil /* targetColNames */, sp.EvalCtx, kvCh, nil /* seqChunkProvider */) + sp.tableDesc, nil /* targetColNames */, sp.EvalCtx, kvCh, nil /* seqChunkProvider */) if err != nil { return err } @@ -127,7 +128,7 @@ func (sp *bulkRowWriter) wrapDupError(ctx context.Context, orig error) error { return orig } v := &roachpb.Value{RawBytes: typed.Value} - return row.NewUniquenessConstraintViolationError(ctx, &sp.tableDesc, typed.Key, v) + return row.NewUniquenessConstraintViolationError(ctx, sp.tableDesc, typed.Key, v) } func (sp *bulkRowWriter) ingestLoop(ctx context.Context, kvCh chan row.KVBatch) error { diff --git a/pkg/sql/rowexec/columnbackfiller.go b/pkg/sql/rowexec/columnbackfiller.go index c1817ae9f1f5..c27d7259092f 100644 --- a/pkg/sql/rowexec/columnbackfiller.go +++ b/pkg/sql/rowexec/columnbackfiller.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/backfill" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -28,7 +29,7 @@ type columnBackfiller struct { backfill.ColumnBackfiller - desc *tabledesc.Immutable + desc catalog.TableDescriptor } var _ execinfra.Processor = &columnBackfiller{} diff --git a/pkg/sql/rowexec/indexbackfiller.go b/pkg/sql/rowexec/indexbackfiller.go index 2bf95bfab248..ed7013209230 100644 --- a/pkg/sql/rowexec/indexbackfiller.go +++ b/pkg/sql/rowexec/indexbackfiller.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/backfill" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -42,7 +43,7 @@ type indexBackfiller struct { adder kvserverbase.BulkAdder - desc *tabledesc.Immutable + desc catalog.TableDescriptor spec execinfrapb.BackfillerSpec @@ -388,12 +389,11 @@ func (ib *indexBackfiller) wrapDupError(ctx context.Context, orig error) error { } desc, err := ib.desc.MakeFirstMutationPublic(tabledesc.IncludeConstraints) - immutable := tabledesc.NewImmutable(*desc.TableDesc()) if err != nil { return err } v := &roachpb.Value{RawBytes: typed.Value} - return row.NewUniquenessConstraintViolationError(ctx, immutable, typed.Key, v) + return row.NewUniquenessConstraintViolationError(ctx, desc, typed.Key, v) } const indexBackfillProgressReportInterval = 10 * time.Second diff --git a/pkg/sql/rowexec/inverted_joiner.go b/pkg/sql/rowexec/inverted_joiner.go index 0f1c6ecc6b7d..49f634582caf 100644 --- a/pkg/sql/rowexec/inverted_joiner.go +++ b/pkg/sql/rowexec/inverted_joiner.go @@ -64,7 +64,7 @@ type invertedJoiner struct { runningState invertedJoinerState diskMonitor *mon.BytesMonitor - desc tabledesc.Immutable + desc catalog.TableDescriptor // The map from ColumnIDs in the table to the column position. colIdxMap catalog.TableColMap index *descpb.IndexDescriptor @@ -185,7 +185,7 @@ func newInvertedJoiner( return nil, errors.AssertionFailedf("unexpected inverted join type %s", spec.Type) } ij := &invertedJoiner{ - desc: tabledesc.MakeImmutable(spec.Table), + desc: tabledesc.NewImmutable(spec.Table), input: input, inputTypes: input.OutputTypes(), prefixEqualityCols: spec.PrefixEqualityColumns, @@ -196,10 +196,11 @@ func newInvertedJoiner( ij.colIdxMap = ij.desc.ColumnIdxMap() var err error - ij.index, _, err = ij.desc.FindIndexByIndexIdx(int(spec.IndexIdx)) - if err != nil { - return nil, err + indexIdx := int(spec.IndexIdx) + if indexIdx >= len(ij.desc.ActiveIndexes()) { + return nil, errors.Errorf("invalid indexIdx %d", indexIdx) } + ij.index = ij.desc.ActiveIndexes()[indexIdx].IndexDesc() ij.invertedColID = ij.index.InvertedColumnID() // Initialize tableRow, indexRow, indexRowTypes, and indexRowToTableRowMap, @@ -219,13 +220,13 @@ func newInvertedJoiner( } tableRowIdx := ij.colIdxMap.GetDefault(colID) ij.indexRowToTableRowMap[indexRowIdx] = tableRowIdx - ij.indexRowTypes[indexRowIdx] = ij.desc.Columns[tableRowIdx].Type + ij.indexRowTypes[indexRowIdx] = ij.desc.GetPublicColumns()[tableRowIdx].Type indexRowIdx++ } outputColCount := len(ij.inputTypes) // Inverted joins are not used for mutations. - rightColTypes := ij.desc.ColumnTypesWithMutations(false /* mutations */) + rightColTypes := ij.desc.ColumnTypes() var includeRightCols bool if ij.joinType == descpb.InnerJoin || ij.joinType == descpb.LeftOuterJoin { outputColCount += len(rightColTypes) @@ -300,7 +301,7 @@ func newInvertedJoiner( // We use ScanVisibilityPublic since inverted joins are not used for mutations, // and so do not need to see in-progress schema changes. _, _, err = initRowFetcher( - flowCtx, &fetcher, &ij.desc, int(spec.IndexIdx), ij.colIdxMap, false, /* reverse */ + flowCtx, &fetcher, ij.desc, int(spec.IndexIdx), ij.colIdxMap, false, /* reverse */ allIndexCols, false /* isCheck */, flowCtx.EvalCtx.Mon, &ij.alloc, execinfra.ScanVisibilityPublic, descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, nil /* systemColumns */, nil, /* virtualColumn */ @@ -321,7 +322,7 @@ func newInvertedJoiner( ij.fetcher = &fetcher } - ij.spanBuilder = span.MakeBuilder(flowCtx.EvalCtx, flowCtx.Codec(), &ij.desc, ij.index) + ij.spanBuilder = span.MakeBuilder(flowCtx.EvalCtx, flowCtx.Codec(), ij.desc, ij.index) ij.spanBuilder.SetNeededColumns(allIndexCols) // Initialize memory monitors and row container for index rows. @@ -447,7 +448,7 @@ func (ij *invertedJoiner) readInput() (invertedJoinerState, *execinfrapb.Produce ij.indexRow[:len(ij.prefixEqualityCols)], ij.indexRowTypes[:len(ij.prefixEqualityCols)], ij.index.ColumnDirections, - &ij.desc, + ij.desc, ij.index, &ij.alloc, nil, /* keyPrefix */ @@ -540,7 +541,7 @@ func (ij *invertedJoiner) performScan() (invertedJoinerState, *execinfrapb.Produ ij.indexRow[:len(ij.prefixEqualityCols)], ij.indexRowTypes[:len(ij.prefixEqualityCols)], ij.index.ColumnDirections, - &ij.desc, + ij.desc, ij.index, &ij.alloc, nil, /* keyPrefix */ diff --git a/pkg/sql/rowexec/joinreader.go b/pkg/sql/rowexec/joinreader.go index 9ae6eaa36e73..8c32281fc8ce 100644 --- a/pkg/sql/rowexec/joinreader.go +++ b/pkg/sql/rowexec/joinreader.go @@ -76,7 +76,7 @@ type joinReader struct { diskMonitor *mon.BytesMonitor - desc tabledesc.Immutable + desc catalog.TableDescriptor index *descpb.IndexDescriptor colIdxMap catalog.TableColMap maintainOrdering bool @@ -188,7 +188,7 @@ func newJoinReader( return nil, errors.Errorf("unsupported joinReaderType") } jr := &joinReader{ - desc: tabledesc.MakeImmutable(spec.Table), + desc: tabledesc.NewImmutable(spec.Table), maintainOrdering: spec.MaintainOrdering, input: input, lookupCols: lookupCols, @@ -204,10 +204,13 @@ func newJoinReader( } var err error var isSecondary bool - jr.index, isSecondary, err = jr.desc.FindIndexByIndexIdx(int(spec.IndexIdx)) - if err != nil { - return nil, err + indexIdx := int(spec.IndexIdx) + if indexIdx >= len(jr.desc.ActiveIndexes()) { + return nil, errors.Errorf("invalid indexIdx %d", indexIdx) } + indexI := jr.desc.ActiveIndexes()[indexIdx] + jr.index = indexI.IndexDesc() + isSecondary = !indexI.Primary() returnMutations := spec.Visibility == execinfra.ScanVisibilityPublicAndNotPublic jr.colIdxMap = jr.desc.ColumnIdxMapWithMutations(returnMutations) @@ -289,7 +292,7 @@ func newJoinReader( var fetcher row.Fetcher _, _, err = initRowFetcher( - flowCtx, &fetcher, &jr.desc, int(spec.IndexIdx), jr.colIdxMap, false, /* reverse */ + flowCtx, &fetcher, jr.desc, int(spec.IndexIdx), jr.colIdxMap, false, /* reverse */ rightCols, false /* isCheck */, jr.EvalCtx.Mon, &jr.alloc, spec.Visibility, spec.LockingStrength, spec.LockingWaitPolicy, sysColDescs, nil, /* virtualColumn */ ) @@ -319,7 +322,7 @@ func (jr *joinReader) initJoinReaderStrategy( neededRightCols util.FastIntSet, readerType joinReaderType, ) { - spanBuilder := span.MakeBuilder(flowCtx.EvalCtx, flowCtx.Codec(), &jr.desc, jr.index) + spanBuilder := span.MakeBuilder(flowCtx.EvalCtx, flowCtx.Codec(), jr.desc, jr.index) spanBuilder.SetNeededColumns(neededRightCols) var keyToInputRowIndices map[string][]int diff --git a/pkg/sql/rowexec/joinreader_test.go b/pkg/sql/rowexec/joinreader_test.go index 83f738d3730d..63665d3d25e3 100644 --- a/pkg/sql/rowexec/joinreader_test.go +++ b/pkg/sql/rowexec/joinreader_test.go @@ -28,7 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" @@ -654,7 +653,7 @@ func TestJoinReader(t *testing.T) { ) diskMonitor.Start(ctx, nil /* pool */, mon.MakeStandaloneBudget(math.MaxInt64)) defer diskMonitor.Stop(ctx) - for i, td := range []*tabledesc.Immutable{tdSecondary, tdFamily, tdInterleaved} { + for i, td := range []catalog.TableDescriptor{tdSecondary, tdFamily, tdInterleaved} { for _, c := range testCases { for _, reqOrdering := range []bool{true, false} { // Small and large batches exercise different paths of interest for diff --git a/pkg/sql/rowexec/processors_test.go b/pkg/sql/rowexec/processors_test.go index d2a0089f2a10..798b35341165 100644 --- a/pkg/sql/rowexec/processors_test.go +++ b/pkg/sql/rowexec/processors_test.go @@ -806,7 +806,7 @@ func TestUncertaintyErrorIsReturned(t *testing.T) { for _, tableName := range errorOriginSpec.tableNames { filters[nodeIdx].tableIDsToFilter = append( filters[nodeIdx].tableIDsToFilter, - int(catalogkv.TestingGetTableDescriptor(tc.Server(0).DB(), keys.SystemSQLCodec, "test", tableName).ID), + int(catalogkv.TestingGetTableDescriptor(tc.Server(0).DB(), keys.SystemSQLCodec, "test", tableName).GetID()), ) } filters[nodeIdx].Unlock() diff --git a/pkg/sql/rowexec/rowfetcher.go b/pkg/sql/rowexec/rowfetcher.go index 1de40c06f1a5..9e630b4e5b8f 100644 --- a/pkg/sql/rowexec/rowfetcher.go +++ b/pkg/sql/rowexec/rowfetcher.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/row" @@ -26,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/mon" + "github.com/cockroachdb/errors" ) // rowFetcher is an interface used to abstract a row fetcher so that a stat @@ -64,7 +64,7 @@ type rowFetcher interface { func initRowFetcher( flowCtx *execinfra.FlowCtx, fetcher *row.Fetcher, - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, indexIdx int, colIdxMap catalog.TableColMap, reverseScan bool, @@ -78,10 +78,12 @@ func initRowFetcher( systemColumns []descpb.ColumnDescriptor, virtualColumn *descpb.ColumnDescriptor, ) (index *descpb.IndexDescriptor, isSecondaryIndex bool, err error) { - index, isSecondaryIndex, err = desc.FindIndexByIndexIdx(indexIdx) - if err != nil { - return nil, false, err + if indexIdx >= len(desc.ActiveIndexes()) { + return nil, false, errors.Errorf("invalid indexIdx %d", indexIdx) } + indexI := desc.ActiveIndexes()[indexIdx] + index = indexI.IndexDesc() + isSecondaryIndex = !indexI.Primary() tableArgs := row.FetcherTableArgs{ Desc: desc, diff --git a/pkg/sql/rowexec/scrub_tablereader.go b/pkg/sql/rowexec/scrub_tablereader.go index 0f20caad0513..a20623831125 100644 --- a/pkg/sql/rowexec/scrub_tablereader.go +++ b/pkg/sql/rowexec/scrub_tablereader.go @@ -48,7 +48,7 @@ var ScrubTypes = []*types.T{ type scrubTableReader struct { tableReader - tableDesc tabledesc.Immutable + tableDesc catalog.TableDescriptor // fetcherResultToColIdx maps Fetcher results to the column index in // the TableDescriptor. This is only initialized and used during scrub // physical checks. @@ -79,7 +79,7 @@ func newScrubTableReader( indexIdx: int(spec.IndexIdx), } - tr.tableDesc = tabledesc.MakeImmutable(spec.Table) + tr.tableDesc = tabledesc.NewImmutable(spec.Table) tr.limitHint = execinfra.LimitHint(spec.LimitHint, post) if err := tr.Init( @@ -125,7 +125,7 @@ func newScrubTableReader( var fetcher row.Fetcher if _, _, err := initRowFetcher( - flowCtx, &fetcher, &tr.tableDesc, int(spec.IndexIdx), tr.tableDesc.ColumnIdxMap(), + flowCtx, &fetcher, tr.tableDesc, int(spec.IndexIdx), tr.tableDesc.ColumnIdxMap(), spec.Reverse, neededColumns, true /* isCheck */, flowCtx.EvalCtx.Mon, &tr.alloc, execinfra.ScanVisibilityPublic, spec.LockingStrength, spec.LockingWaitPolicy, nil /* systemColumns */, nil, /* virtualColumn */ @@ -153,7 +153,7 @@ func (tr *scrubTableReader) generateScrubErrorRow( // Collect all the row values into JSON rowDetails := make(map[string]interface{}) for i, colIdx := range tr.fetcherResultToColIdx { - col := tr.tableDesc.Columns[colIdx] + col := tr.tableDesc.GetPublicColumns()[colIdx] // TODO(joey): We should maybe try to get the underlying type. rowDetails[col.Name] = row[i].String(col.Type) } @@ -193,7 +193,7 @@ func (tr *scrubTableReader) prettyPrimaryKeyValues( } var colIDToRowIdxMap catalog.TableColMap for rowIdx, colIdx := range tr.fetcherResultToColIdx { - colIDToRowIdxMap.Set(tr.tableDesc.Columns[colIdx].ID, rowIdx) + colIDToRowIdxMap.Set(tr.tableDesc.GetPublicColumns()[colIdx].ID, rowIdx) } var primaryKeyValues bytes.Buffer primaryKeyValues.WriteByte('(') diff --git a/pkg/sql/rowexec/tablereader_test.go b/pkg/sql/rowexec/tablereader_test.go index ee1fbd9527e6..215041a1d969 100644 --- a/pkg/sql/rowexec/tablereader_test.go +++ b/pkg/sql/rowexec/tablereader_test.go @@ -433,7 +433,7 @@ func TestLimitScans(t *testing.T) { // specific so that we don't count range resolving requests, and we dedupe // scans from the same key as the DistSender retries scans when it detects // splits. - re := regexp.MustCompile(fmt.Sprintf(`querying next range at /Table/%d/1(\S.*)?`, tableDesc.ID)) + re := regexp.MustCompile(fmt.Sprintf(`querying next range at /Table/%d/1(\S.*)?`, tableDesc.GetID())) spans := sp.GetRecording() ranges := make(map[string]struct{}) for _, span := range spans { diff --git a/pkg/sql/rowexec/zigzagjoiner.go b/pkg/sql/rowexec/zigzagjoiner.go index c62d4fe3b938..9655b1f02e72 100644 --- a/pkg/sql/rowexec/zigzagjoiner.go +++ b/pkg/sql/rowexec/zigzagjoiner.go @@ -284,9 +284,9 @@ func newZigzagJoiner( z := &zigzagJoiner{} // TODO(ajwerner): Utilize a cached copy of these tables. - tables := make([]tabledesc.Immutable, len(spec.Tables)) + tables := make([]catalog.TableDescriptor, len(spec.Tables)) for i := range spec.Tables { - tables[i] = tabledesc.MakeImmutable(spec.Tables[i]) + tables[i] = tabledesc.NewImmutable(spec.Tables[i]) } leftColumnTypes := tables[0].ColumnTypes() rightColumnTypes := tables[1].ColumnTypes() @@ -338,7 +338,7 @@ func newZigzagJoiner( if err := z.setupInfo(flowCtx, spec, i, colOffset, tables); err != nil { return nil, err } - colOffset += len(z.infos[i].table.Columns) + colOffset += len(z.infos[i].table.GetPublicColumns()) } z.side = 0 return z, nil @@ -374,7 +374,7 @@ func (z *zigzagJoiner) Start(ctx context.Context) context.Context { type zigzagJoinerInfo struct { fetcher row.Fetcher alloc *rowenc.DatumAlloc - table *tabledesc.Immutable + table catalog.TableDescriptor index *descpb.IndexDescriptor indexTypes []*types.T indexDirs []descpb.IndexDescriptor_Direction @@ -411,13 +411,13 @@ func (z *zigzagJoiner) setupInfo( spec *execinfrapb.ZigzagJoinerSpec, side int, colOffset int, - tables []tabledesc.Immutable, + tables []catalog.TableDescriptor, ) error { z.side = side info := z.infos[side] info.alloc = &rowenc.DatumAlloc{} - info.table = &tables[side] + info.table = tables[side] info.eqColumns = spec.EqColumns[side].Columns indexOrdinal := spec.IndexOrdinals[side] info.index = info.table.ActiveIndexes()[indexOrdinal].IndexDesc() @@ -440,7 +440,7 @@ func (z *zigzagJoiner) setupInfo( // Add the outputted columns. neededCols := util.MakeFastIntSet() outCols := z.Out.NeededColumns() - maxCol := colOffset + len(info.table.Columns) + maxCol := colOffset + len(info.table.GetPublicColumns()) for i, ok := outCols.Next(colOffset); ok && i < maxCol; i, ok = outCols.Next(i + 1) { neededCols.Add(i - colOffset) } @@ -653,7 +653,7 @@ func (zi *zigzagJoinerInfo) eqColTypes() []*types.T { func (zi *zigzagJoinerInfo) eqOrdering() (colinfo.ColumnOrdering, error) { ordering := make(colinfo.ColumnOrdering, len(zi.eqColumns)) for i := range zi.eqColumns { - colID := zi.table.Columns[zi.eqColumns[i]].ID + colID := zi.table.GetPublicColumns()[zi.eqColumns[i]].ID // Search the index columns, then the primary keys to find an ordering for // the current column, 'colID'. var direction encoding.Direction diff --git a/pkg/sql/scan.go b/pkg/sql/scan.go index 435c318eb1db..0dd5607d3636 100644 --- a/pkg/sql/scan.go +++ b/pkg/sql/scan.go @@ -12,12 +12,13 @@ package sql import ( "context" + "fmt" "sync" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" @@ -44,7 +45,7 @@ type scanNode struct { // Enforce this using NoCopy. _ util.NoCopy - desc *tabledesc.Immutable + desc catalog.TableDescriptor index *descpb.IndexDescriptor // Set if an index was explicitly specified. @@ -202,7 +203,7 @@ func (n *scanNode) limitHint() int64 { func (n *scanNode) initTable( ctx context.Context, p *planner, - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, indexFlags *tree.IndexFlags, colCfg scanColumnsConfig, ) error { @@ -246,9 +247,24 @@ func (n *scanNode) lookupSpecifiedIndex(indexFlags *tree.IndexFlags) error { return nil } +// findReadableColumnByID finds the readable column with specified ID. The +// column may be undergoing a schema change and is marked nullable regardless +// of its configuration. It returns true if the column is undergoing a +// schema change. +func findReadableColumnByID( + desc catalog.TableDescriptor, id descpb.ColumnID, +) (*descpb.ColumnDescriptor, error) { + for _, c := range desc.ReadableColumns() { + if c.ID == id { + return &c, nil + } + } + return nil, fmt.Errorf("column-id \"%d\" does not exist", id) +} + // initColsForScan initializes cols according to desc and colCfg. func initColsForScan( - desc *tabledesc.Immutable, colCfg scanColumnsConfig, + desc catalog.TableDescriptor, colCfg scanColumnsConfig, ) (cols []*descpb.ColumnDescriptor, err error) { if colCfg.wantedColumns == nil { return nil, errors.AssertionFailedf("unexpectedly wantedColumns is nil") @@ -270,7 +286,7 @@ func initColsForScan( if id := descpb.ColumnID(wc); colCfg.visibility == execinfra.ScanVisibilityPublic { c, err = desc.FindActiveColumnByID(id) } else { - c, _, err = desc.FindReadableColumnByID(id) + c, err = findReadableColumnByID(desc, id) } if err != nil { return cols, err @@ -289,8 +305,8 @@ func initColsForScan( } if colCfg.addUnwantedAsHidden { - for i := range desc.Columns { - c := &desc.Columns[i] + for i := range desc.GetPublicColumns() { + c := &desc.GetPublicColumns()[i] found := false for _, wc := range colCfg.wantedColumns { if descpb.ColumnID(wc) == c.ID { diff --git a/pkg/sql/scatter_test.go b/pkg/sql/scatter_test.go index 508086649484..c412a82e1d3a 100644 --- a/pkg/sql/scatter_test.go +++ b/pkg/sql/scatter_test.go @@ -136,7 +136,7 @@ func TestScatterResponse(t *testing.T) { } var expectedKey roachpb.Key if i == 0 { - expectedKey = keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) + expectedKey = keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.GetID())) } else { var err error expectedKey, err = rowenc.TestingMakePrimaryIndexKey(tableDesc, i*10) diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 4f862e383fe1..d355bbf88c13 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -342,30 +342,30 @@ func (sc *SchemaChanger) backfillQueryIntoTable( // this writing) this code path is only used for standalone CREATE // TABLE AS statements, which cannot be traced. func (sc *SchemaChanger) maybeBackfillCreateTableAs( - ctx context.Context, table *tabledesc.Immutable, + ctx context.Context, table catalog.TableDescriptor, ) error { if !(table.Adding() && table.IsAs()) { return nil } - log.Infof(ctx, "starting backfill for CREATE TABLE AS with query %q", table.CreateQuery) + log.Infof(ctx, "starting backfill for CREATE TABLE AS with query %q", table.GetCreateQuery()) - return sc.backfillQueryIntoTable(ctx, table.TableDesc(), table.CreateQuery, table.CreateAsOfTime, "ctasBackfill") + return sc.backfillQueryIntoTable(ctx, table.TableDesc(), table.GetCreateQuery(), table.GetCreateAsOfTime(), "ctasBackfill") } func (sc *SchemaChanger) maybeBackfillMaterializedView( - ctx context.Context, table *tabledesc.Immutable, + ctx context.Context, table catalog.TableDescriptor, ) error { if !(table.Adding() && table.MaterializedView()) { return nil } - log.Infof(ctx, "starting backfill for CREATE MATERIALIZED VIEW with query %q", table.ViewQuery) + log.Infof(ctx, "starting backfill for CREATE MATERIALIZED VIEW with query %q", table.GetViewQuery()) - return sc.backfillQueryIntoTable(ctx, table.TableDesc(), table.ViewQuery, table.CreateAsOfTime, "materializedViewBackfill") + return sc.backfillQueryIntoTable(ctx, table.TableDesc(), table.GetViewQuery(), table.GetCreateAsOfTime(), "materializedViewBackfill") } // maybe make a table PUBLIC if it's in the ADD state. func (sc *SchemaChanger) maybeMakeAddTablePublic( - ctx context.Context, table *tabledesc.Immutable, + ctx context.Context, table catalog.TableDescriptor, ) error { if !table.Adding() { return nil @@ -380,7 +380,7 @@ func (sc *SchemaChanger) maybeMakeAddTablePublic( } return sc.txn(ctx, func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - mut, err := descsCol.GetMutableTableVersionByID(ctx, table.ID, txn) + mut, err := descsCol.GetMutableTableVersionByID(ctx, table.GetID(), txn) if err != nil { return err } @@ -512,7 +512,7 @@ func (sc *SchemaChanger) notFirstInLine(ctx context.Context, desc catalog.Descri // descriptor, it seems possible for a job to be resumed after the mutation // has already been removed. If there's a mutation provided, we should check // whether it actually exists on the table descriptor and exit the job if not. - for i, mutation := range tableDesc.TableDesc().Mutations { + for i, mutation := range tableDesc.GetMutations() { if mutation.MutationID == sc.mutationID { if i != 0 { log.Infof(ctx, @@ -608,7 +608,7 @@ func (sc *SchemaChanger) exec(ctx context.Context) error { return nil } - tableDesc, ok := desc.(*tabledesc.Immutable) + tableDesc, ok := desc.(catalog.TableDescriptor) if !ok { // If our descriptor is not a table, then just drain leases. if err := waitToUpdateLeases(false /* refreshStats */); err != nil { @@ -631,8 +631,8 @@ func (sc *SchemaChanger) exec(ctx context.Context) error { if tableDesc.IsPhysicalTable() { // We've dropped this physical table, let's kick off a GC job. dropTime := timeutil.Now().UnixNano() - if tableDesc.TableDesc().DropTime > 0 { - dropTime = tableDesc.TableDesc().DropTime + if tableDesc.GetDropTime() > 0 { + dropTime = tableDesc.GetDropTime() } gcDetails := jobspb.SchemaChangeGCDetails{ Tables: []jobspb.SchemaChangeGCDetails_DroppedID{ @@ -670,7 +670,7 @@ func (sc *SchemaChanger) exec(ctx context.Context) error { if sc.mutationID == descpb.InvalidMutationID { // Nothing more to do. - isCreateTableAs := tableDesc.Adding() && tableDesc.TableDesc().IsAs() + isCreateTableAs := tableDesc.Adding() && tableDesc.IsAs() return waitToUpdateLeases(isCreateTableAs /* refreshStats */) } @@ -784,7 +784,7 @@ func (sc *SchemaChanger) initJobRunningStatus(ctx context.Context) error { } var runStatus jobs.RunningStatus - for _, mutation := range desc.Mutations { + for _, mutation := range desc.GetMutations() { if mutation.MutationID != sc.mutationID { // Mutations are applied in a FIFO order. Only apply the first set of // mutations if they have the mutation ID we're looking for. @@ -1609,7 +1609,7 @@ func (sc *SchemaChanger) maybeReverseMutations(ctx context.Context, causingError return err } - tableDesc := scTable.ImmutableCopy().(*tabledesc.Immutable) + tableDesc := scTable.ImmutableCopy().(catalog.TableDescriptor) // Mark the schema change job as failed and create a rollback job. err = sc.updateJobForRollback(ctx, txn, tableDesc) if err != nil { @@ -1658,12 +1658,12 @@ func (sc *SchemaChanger) maybeReverseMutations(ctx context.Context, causingError // updateJobForRollback updates the schema change job in the case of a rollback. func (sc *SchemaChanger) updateJobForRollback( - ctx context.Context, txn *kv.Txn, tableDesc *tabledesc.Immutable, + ctx context.Context, txn *kv.Txn, tableDesc catalog.TableDescriptor, ) error { // Initialize refresh spans to scan the entire table. span := tableDesc.PrimaryIndexSpan(sc.execCfg.Codec) var spanList []jobspb.ResumeSpanList - for _, m := range tableDesc.Mutations { + for _, m := range tableDesc.GetMutations() { if m.MutationID == sc.mutationID { spanList = append(spanList, jobspb.ResumeSpanList{ @@ -2408,9 +2408,9 @@ func (sc *SchemaChanger) queueCleanupJobs( // DeleteTableDescAndZoneConfig removes a table's descriptor and zone config from the KV database. func DeleteTableDescAndZoneConfig( - ctx context.Context, db *kv.DB, codec keys.SQLCodec, tableDesc *tabledesc.Immutable, + ctx context.Context, db *kv.DB, codec keys.SQLCodec, tableDesc catalog.TableDescriptor, ) error { - log.Infof(ctx, "removing table descriptor and zone config for table %d", tableDesc.ID) + log.Infof(ctx, "removing table descriptor and zone config for table %d", tableDesc.GetID()) return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(codec.ForSystemTenant()); err != nil { return err @@ -2418,11 +2418,11 @@ func DeleteTableDescAndZoneConfig( b := &kv.Batch{} // Delete the descriptor. - descKey := catalogkeys.MakeDescMetadataKey(codec, tableDesc.ID) + descKey := catalogkeys.MakeDescMetadataKey(codec, tableDesc.GetID()) b.Del(descKey) // Delete the zone config entry for this table, if necessary. if codec.ForSystemTenant() { - zoneKeyPrefix := config.MakeZoneKeyPrefix(config.SystemTenantObjectID(tableDesc.ID)) + zoneKeyPrefix := config.MakeZoneKeyPrefix(config.SystemTenantObjectID(tableDesc.GetID())) b.DelRange(zoneKeyPrefix, zoneKeyPrefix.PrefixEnd(), false /* returnKeys */) } return txn.Run(ctx, b) diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index fc6e4b82ef72..405b8997d6f8 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -34,12 +34,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" "github.com/cockroachdb/cockroach/pkg/sql/sqltestutils" @@ -151,7 +151,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); id, tableDesc.NextMutationID, instance, kvDB, leaseMgr, jobRegistry, &execCfg, cluster.MakeTestingClusterSettings(), ) - tableDesc.Mutations = append(tableDesc.Mutations, descpb.DescriptorMutation{ + tableDesc.TableDesc().Mutations = append(tableDesc.TableDesc().Mutations, descpb.DescriptorMutation{ Descriptor_: &descpb.DescriptorMutation_Index{Index: &index}, Direction: descpb.DescriptorMutation_ADD, State: descpb.DescriptorMutation_DELETE_ONLY, @@ -163,11 +163,11 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); for _, direction := range []descpb.DescriptorMutation_Direction{ descpb.DescriptorMutation_ADD, descpb.DescriptorMutation_DROP, } { - tableDesc.Mutations[0].Direction = direction + tableDesc.GetMutations()[0].Direction = direction expectedVersion++ if err := kvDB.Put( ctx, - catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), + catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.GetID()), tableDesc.DescriptorProto(), ); err != nil { t.Fatal(err) @@ -189,7 +189,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) } - state := tableDesc.Mutations[0].State + state := tableDesc.GetMutations()[0].State if state != expectedState { t.Fatalf("bad state; e = %d, v = %d", expectedState, state) } @@ -198,7 +198,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); // RunStateMachineBeforeBackfill() doesn't complete the schema change. tableDesc = catalogkv.TestingGetMutableExistingTableDescriptor( kvDB, keys.SystemSQLCodec, "t", "test") - if len(tableDesc.Mutations) == 0 { + if len(tableDesc.GetMutations()) == 0 { t.Fatalf("table expected to have an outstanding schema change: %v", tableDesc) } } @@ -314,7 +314,7 @@ CREATE INDEX foo ON t.test (v) func getTableKeyCount(ctx context.Context, kvDB *kv.DB) (int, error) { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) + tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.GetID())) tableEnd := tablePrefix.PrefixEnd() kvs, err := kvDB.Scan(ctx, tablePrefix, tableEnd, 0) return len(kvs), err @@ -791,7 +791,7 @@ CREATE UNIQUE INDEX vidx ON t.test (v); wg.Wait() // Ensure that the table data hasn't been deleted. - tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) + tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.GetID())) tableEnd := tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(ctx, tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) @@ -800,7 +800,7 @@ CREATE UNIQUE INDEX vidx ON t.test (v); } // Check that the table descriptor exists so we know the data will // eventually be deleted. - tbDescKey := catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID) + tbDescKey := catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.GetID()) if gr, err := kvDB.Get(ctx, tbDescKey); err != nil { t.Fatal(err) } else if !gr.Exists() { @@ -990,7 +990,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Add a zone config for the table. - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()); err != nil { t.Fatal(err) } @@ -1220,8 +1220,8 @@ CREATE TABLE t.test ( // Read table descriptor. tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(tableDesc.Checks) != 3 { - t.Fatalf("Expected 3 checks but got %d ", len(tableDesc.Checks)) + if len(tableDesc.GetChecks()) != 3 { + t.Fatalf("Expected 3 checks but got %d ", len(tableDesc.GetChecks())) } if _, err := sqlDB.Exec("ALTER TABLE t.test DROP v"); err != nil { @@ -1231,16 +1231,16 @@ CREATE TABLE t.test ( // Re-read table descriptor. tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Only check_ab should remain - if len(tableDesc.Checks) != 1 { + if len(tableDesc.GetChecks()) != 1 { checkExprs := make([]string, 0) - for i := range tableDesc.Checks { - checkExprs = append(checkExprs, tableDesc.Checks[i].Expr) + for i := range tableDesc.GetChecks() { + checkExprs = append(checkExprs, tableDesc.GetChecks()[i].Expr) } - t.Fatalf("Expected 1 check but got %d with CHECK expr %s ", len(tableDesc.Checks), strings.Join(checkExprs, ", ")) + t.Fatalf("Expected 1 check but got %d with CHECK expr %s ", len(tableDesc.GetChecks()), strings.Join(checkExprs, ", ")) } - if tableDesc.Checks[0].Name != "check_ab" { - t.Fatalf("Only check_ab should remain, got: %s ", tableDesc.Checks[0].Name) + if tableDesc.GetChecks()[0].Name != "check_ab" { + t.Fatalf("Only check_ab should remain, got: %s ", tableDesc.GetChecks()[0].Name) } // Test that a constraint being added prevents the column from being dropped. @@ -1415,7 +1415,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - id := tableDesc.ID + id := tableDesc.GetID() ctx := context.Background() upTableVersion = func() { @@ -1556,7 +1556,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); atomic.StoreUint32(&enableAsyncSchemaChanges, 1) tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // deal with schema change knob - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()); err != nil { t.Fatal(err) } @@ -1572,9 +1572,9 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // There is still a DROP INDEX mutation waiting for GC. - if e := 1; len(tableDesc.GCMutations) != e { - t.Fatalf("the table has %d instead of %d GC mutations", len(tableDesc.GCMutations), e) - } else if m := tableDesc.GCMutations[0]; m.IndexID != 2 && m.DropTime == 0 && m.JobID == 0 { + if e := 1; len(tableDesc.GetGCMutations()) != e { + t.Fatalf("the table has %d instead of %d GC mutations", len(tableDesc.GetGCMutations()), e) + } else if m := tableDesc.GetGCMutations()[0]; m.IndexID != 2 && m.DropTime == 0 && m.JobID == 0 { t.Fatalf("unexpected GC mutation %v", m) } @@ -1598,8 +1598,8 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); testutils.SucceedsSoon(t, func() error { tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(tableDesc.GCMutations) > 0 { - return errors.Errorf("%d GC mutations remaining", len(tableDesc.GCMutations)) + if len(tableDesc.GetGCMutations()) > 0 { + return errors.Errorf("%d GC mutations remaining", len(tableDesc.GetGCMutations())) } return nil }) @@ -1823,8 +1823,8 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); // Wait until all the mutations have been processed. testutils.SucceedsSoon(t, func() error { tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(tableDesc.Mutations) > 0 { - return errors.Errorf("%d mutations remaining", len(tableDesc.Mutations)) + if len(tableDesc.GetMutations()) > 0 { + return errors.Errorf("%d mutations remaining", len(tableDesc.GetMutations())) } return nil }) @@ -1923,14 +1923,14 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); } // Add immediate GC TTL to allow index creation purge to complete. - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()); err != nil { t.Fatal(err) } testutils.SucceedsSoon(t, func() error { tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(tableDesc.GCMutations) > 0 { - return errors.Errorf("%d gc mutations remaining", len(tableDesc.GCMutations)) + if len(tableDesc.GetGCMutations()) > 0 { + return errors.Errorf("%d gc mutations remaining", len(tableDesc.GetGCMutations())) } return nil }) @@ -1957,7 +1957,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); Username: security.RootUserName(), Description: tc.sql, DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }); err != nil { t.Fatal(err) @@ -1972,7 +1972,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); Username: security.RootUserName(), Description: fmt.Sprintf("ROLL BACK JOB %d: %s", jobID, testCases[jobRolledBack].sql), DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }); err != nil { t.Fatal(err) @@ -2005,7 +2005,7 @@ CREATE TABLE t.test ( t.Fatal(err) } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if tableDesc.Families[0].DefaultColumnID != 0 { + if tableDesc.GetFamilies()[0].DefaultColumnID != 0 { t.Fatalf("default column id not set properly: %s", tableDesc) } @@ -2025,7 +2025,7 @@ CREATE TABLE t.test ( // values. This is done to make the table appear like it were // written in the past when cockroachdb used to write sentinel // values for each table row. - startKey := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) + startKey := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.GetID())) kvs, err := kvDB.Scan( ctx, startKey, @@ -2047,7 +2047,7 @@ CREATE TABLE t.test ( t.Fatal(err) } tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if tableDesc.Families[0].DefaultColumnID != 2 { + if tableDesc.GetFamilies()[0].DefaultColumnID != 2 { t.Fatalf("default column id not set properly: %s", tableDesc) } @@ -2243,12 +2243,12 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT UNIQUE DEFAULT 23 CREATE FAMILY F3 // TODO(erik): Ignore errors or individually drop indexes in // DELETE_AND_WRITE_ONLY which failed during the creation backfill // as a rollback from a drop. - if e := 1; e != len(tableDesc.Columns) { - t.Fatalf("e = %d, v = %d, columns = %+v", e, len(tableDesc.Columns), tableDesc.Columns) - } else if tableDesc.Columns[0].Name != "k" { - t.Fatalf("columns %+v", tableDesc.Columns) - } else if len(tableDesc.Mutations) != 2 { - t.Fatalf("mutations %+v", tableDesc.Mutations) + if e := 1; e != len(tableDesc.GetPublicColumns()) { + t.Fatalf("e = %d, v = %d, columns = %+v", e, len(tableDesc.GetPublicColumns()), tableDesc.GetPublicColumns()) + } else if tableDesc.GetPublicColumns()[0].Name != "k" { + t.Fatalf("columns %+v", tableDesc.GetPublicColumns()) + } else if len(tableDesc.GetMutations()) != 2 { + t.Fatalf("mutations %+v", tableDesc.GetMutations()) } } @@ -2606,7 +2606,7 @@ CREATE TABLE t.test (k INT NOT NULL, v INT); // GC the old indexes to be dropped after the PK change immediately. defer sqltestutils.DisableGCTTLStrictEnforcement(t, sqlDB)() tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()); err != nil { t.Fatal(err) } @@ -2818,8 +2818,8 @@ COMMIT; // Ensure that t.test doesn't have any pending mutations // after the primary key change. desc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(desc.Mutations) != 0 { - t.Fatalf("expected to find 0 mutations, but found %d", len(desc.Mutations)) + if len(desc.GetMutations()) != 0 { + t.Fatalf("expected to find 0 mutations, but found %d", len(desc.GetMutations())) } } @@ -3015,7 +3015,7 @@ ALTER TABLE t.test ALTER PRIMARY KEY USING COLUMNS (v); // Wait for the async schema changer to run. tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()); err != nil { t.Fatal(err) } testutils.SucceedsSoon(t, func() error { @@ -3091,8 +3091,8 @@ CREATE TABLE t.test (k INT NOT NULL, v INT); // that the job did not succeed even though it was canceled. testutils.SucceedsSoon(t, func() error { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(tableDesc.Mutations) != 0 { - return errors.Errorf("expected 0 mutations after cancellation, found %d", len(tableDesc.Mutations)) + if len(tableDesc.GetMutations()) != 0 { + return errors.Errorf("expected 0 mutations after cancellation, found %d", len(tableDesc.GetMutations())) } if tableDesc.GetPrimaryIndex().NumColumns() != 1 || tableDesc.GetPrimaryIndex().GetColumnName(0) != "rowid" { return errors.Errorf("expected primary key change to not succeed after cancellation") @@ -3103,7 +3103,7 @@ CREATE TABLE t.test (k INT NOT NULL, v INT); // Stop any further attempts at cancellation, so the GC jobs don't fail. shouldCancel = false tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if _, err := sqltestutils.AddImmediateGCZoneConfig(db, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(db, tableDesc.GetID()); err != nil { t.Fatal(err) } // Ensure that the writes from the partial new indexes are cleaned up. @@ -3160,7 +3160,7 @@ CREATE TABLE t.test (k INT NOT NULL, v INT); return jobutils.VerifySystemJob(t, sqlRun, 1, jobspb.TypeSchemaChange, jobs.StatusSucceeded, jobs.Record{ Description: "CLEANUP JOB for 'ALTER TABLE t.public.test ALTER PRIMARY KEY USING COLUMNS (k)'", Username: security.RootUserName(), - DescriptorIDs: descpb.IDs{tableDesc.ID}, + DescriptorIDs: descpb.IDs{tableDesc.GetID()}, }) }) } @@ -3393,7 +3393,7 @@ INSERT INTO t.test (k, v, length) VALUES (2, 3, 1); // Wait until both mutations are queued up. testutils.SucceedsSoon(t, func() error { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if l := len(tableDesc.Mutations); l != 3 { + if l := len(tableDesc.GetMutations()); l != 3 { return errors.Errorf("number of mutations = %d", l) } return nil @@ -3491,7 +3491,7 @@ INSERT INTO t.test (k, v, length) VALUES (2, 3, 1); } tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if l := len(tableDesc.Mutations); l != 3 { + if l := len(tableDesc.GetMutations()); l != 3 { t.Fatalf("number of mutations = %d", l) } @@ -4076,11 +4076,11 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14 if err != nil { t.Fatal(err) } - if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.ID, buf); err != nil { + if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.GetID(), buf); err != nil { t.Fatal(err) } - if err := zoneExists(sqlDB, &cfg, tableDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, tableDesc.GetID()); err != nil { t.Fatal(err) } @@ -4096,14 +4096,14 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14 tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if tableDesc.Adding() { - t.Fatalf("bad state = %s", tableDesc.State) + t.Fatalf("bad state = %s", tableDesc.GetState()) } - if err := zoneExists(sqlDB, &cfg, tableDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, tableDesc.GetID()); err != nil { t.Fatal(err) } // Ensure that the table data hasn't been deleted. - tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) + tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.GetID())) tableEnd := tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(ctx, tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) @@ -4118,7 +4118,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14 return jobutils.VerifySystemJob(t, sqlRun, 0, jobspb.TypeSchemaChangeGC, jobs.StatusRunning, jobs.Record{ Description: "GC for TRUNCATE TABLE t.public.test", Username: security.RootUserName(), - DescriptorIDs: descpb.IDs{tableDesc.ID}, + DescriptorIDs: descpb.IDs{tableDesc.GetID()}, }) }) } @@ -4170,12 +4170,12 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL REFERENCES t.pi (d) DE // Add a zone config. var cfg zonepb.ZoneConfig - cfg, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID) + cfg, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()) if err != nil { t.Fatal(err) } - if err := zoneExists(sqlDB, &cfg, tableDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, tableDesc.GetID()); err != nil { t.Fatal(err) } @@ -4212,14 +4212,14 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL REFERENCES t.pi (d) DE // Get the table descriptor after the truncation. newTableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if newTableDesc.Adding() { - t.Fatalf("bad state = %s", newTableDesc.State) + t.Fatalf("bad state = %s", newTableDesc.GetState()) } - if err := zoneExists(sqlDB, &cfg, newTableDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, newTableDesc.GetID()); err != nil { t.Fatal(err) } // Ensure that the table data has been deleted. - tablePrefix := keys.SystemSQLCodec.IndexPrefix(uint32(tableDesc.ID), uint32(tableDesc.GetPrimaryIndexID())) + tablePrefix := keys.SystemSQLCodec.IndexPrefix(uint32(tableDesc.GetID()), uint32(tableDesc.GetPrimaryIndexID())) tableEnd := tablePrefix.PrefixEnd() testutils.SucceedsSoon(t, func() error { if kvs, err := kvDB.Scan(ctx, tablePrefix, tableEnd, 0); err != nil { @@ -4231,7 +4231,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL REFERENCES t.pi (d) DE }) fkTableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "pi") - tablePrefix = keys.SystemSQLCodec.TablePrefix(uint32(fkTableDesc.ID)) + tablePrefix = keys.SystemSQLCodec.TablePrefix(uint32(fkTableDesc.GetID())) tableEnd = tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(ctx, tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) @@ -4249,7 +4249,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL REFERENCES t.pi (d) DE Username: security.RootUserName(), Description: "TRUNCATE TABLE t.public.test", DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }); err != nil { t.Fatal(err) @@ -4287,9 +4287,9 @@ INSERT INTO t.child VALUES (1, 2), (2, 3), (3, 4); child := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "child") // Add zone configs for the parent and child tables. - _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDBRaw, parent.ID) + _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDBRaw, parent.GetID()) require.NoError(t, err) - _, err = sqltestutils.AddImmediateGCZoneConfig(sqlDBRaw, child.ID) + _, err = sqltestutils.AddImmediateGCZoneConfig(sqlDBRaw, child.GetID()) require.NoError(t, err) // Truncate the parent now, which should cascade truncate the child. @@ -4303,7 +4303,7 @@ INSERT INTO t.child VALUES (1, 2), (2, 3), (3, 4); testutils.SucceedsSoon(t, func() error { // We only need to scan the parent's table span to verify that // the index data is deleted, since child is interleaved in it. - start := keys.SystemSQLCodec.TablePrefix(uint32(parent.ID)) + start := keys.SystemSQLCodec.TablePrefix(uint32(parent.GetID())) end := start.PrefixEnd() kvs, err := kvDB.Scan(ctx, start, end, 0) require.NoError(t, err) @@ -4378,8 +4378,8 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); // Check that an outstanding schema change exists. tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - oldID := tableDesc.ID - if lenMutations := len(tableDesc.Mutations); lenMutations != 3 { + oldID := tableDesc.GetID() + if lenMutations := len(tableDesc.GetMutations()); lenMutations != 3 { t.Fatalf("%d outstanding schema change", lenMutations) } @@ -4397,7 +4397,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); // The new table is truncated. tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) + tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.GetID())) tableEnd := tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(context.Background(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) @@ -4406,13 +4406,13 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } // Col "x" is public and col "v" is dropped. - if num := len(tableDesc.Mutations); num > 0 { + if num := len(tableDesc.GetMutations()); num > 0 { t.Fatalf("%d outstanding mutation", num) } - if lenCols := len(tableDesc.Columns); lenCols != 2 { + if lenCols := len(tableDesc.GetPublicColumns()); lenCols != 2 { t.Fatalf("%d columns", lenCols) } - if k, x := tableDesc.Columns[0].Name, tableDesc.Columns[1].Name; k != "k" && x != "x" { + if k, x := tableDesc.GetPublicColumns()[0].Name, tableDesc.GetPublicColumns()[1].Name; k != "k" && x != "x" { t.Fatalf("columns %q, %q in descriptor", k, x) } if checks := tableDesc.AllActiveAndInactiveChecks(); len(checks) != 1 { @@ -4730,8 +4730,8 @@ ALTER TABLE t.test ADD COLUMN c INT AS (v + 4) STORED, ADD COLUMN d INT DEFAULT // The descriptor version hasn't changed. tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if tableDesc.Version != 1 { - t.Fatalf("invalid version = %d", tableDesc.Version) + if tableDesc.GetVersion() != 1 { + t.Fatalf("invalid version = %d", tableDesc.GetVersion()) } } @@ -4841,7 +4841,7 @@ func TestCancelSchemaChange(t *testing.T) { Username: security.RootUserName(), Description: tc.sql, DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }); err != nil { t.Fatal(err) @@ -4852,7 +4852,7 @@ func TestCancelSchemaChange(t *testing.T) { Username: security.RootUserName(), Description: fmt.Sprintf("ROLL BACK JOB %d: %s", jobID, tc.sql), DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, } var err error @@ -4870,7 +4870,7 @@ func TestCancelSchemaChange(t *testing.T) { Username: security.RootUserName(), Description: tc.sql, DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }); err != nil { t.Fatal(err) @@ -4913,7 +4913,7 @@ func TestCancelSchemaChange(t *testing.T) { // Verify that the data from the canceled CREATE INDEX is cleaned up. atomic.StoreUint32(&enableAsyncSchemaChanges, 1) // TODO (lucy): when this test is no longer canceled, have it correctly handle doing GC immediately - if _, err := sqltestutils.AddImmediateGCZoneConfig(db, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(db, tableDesc.GetID()); err != nil { t.Fatal(err) } testutils.SucceedsSoon(t, func() error { @@ -5200,7 +5200,7 @@ func TestIndexBackfillValidation(t *testing.T) { const maxValue = 1000 backfillCount := int64(0) var db *kv.DB - var tableDesc *tabledesc.Immutable + var tableDesc catalog.TableDescriptor params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ BackfillChunkSize: maxValue / 5, @@ -5210,7 +5210,7 @@ func TestIndexBackfillValidation(t *testing.T) { count := atomic.AddInt64(&backfillCount, 1) if count == 2 { // drop an index value before validation. - key := keys.SystemSQLCodec.IndexPrefix(uint32(tableDesc.ID), uint32(tableDesc.NextIndexID)) + key := keys.SystemSQLCodec.IndexPrefix(uint32(tableDesc.GetID()), uint32(tableDesc.GetNextIndexID())) kv, err := db.Scan(context.Background(), key, key.PrefixEnd(), 1) if err != nil { t.Error(err) @@ -5257,8 +5257,8 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(tableDesc.PublicNonPrimaryIndexes()) > 0 || len(tableDesc.Mutations) > 0 { - t.Fatalf("descriptor broken %d, %d", len(tableDesc.PublicNonPrimaryIndexes()), len(tableDesc.Mutations)) + if len(tableDesc.PublicNonPrimaryIndexes()) > 0 || len(tableDesc.GetMutations()) > 0 { + t.Fatalf("descriptor broken %d, %d", len(tableDesc.PublicNonPrimaryIndexes()), len(tableDesc.GetMutations())) } } @@ -5271,7 +5271,7 @@ func TestInvertedIndexBackfillValidation(t *testing.T) { const maxValue = 1000 backfillCount := int64(0) var db *kv.DB - var tableDesc *tabledesc.Immutable + var tableDesc catalog.TableDescriptor params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ BackfillChunkSize: maxValue / 5, @@ -5281,7 +5281,7 @@ func TestInvertedIndexBackfillValidation(t *testing.T) { count := atomic.AddInt64(&backfillCount, 1) if count == 2 { // drop an index value before validation. - key := keys.SystemSQLCodec.IndexPrefix(uint32(tableDesc.ID), uint32(tableDesc.NextIndexID)) + key := keys.SystemSQLCodec.IndexPrefix(uint32(tableDesc.GetID()), uint32(tableDesc.GetNextIndexID())) kv, err := db.Scan(context.Background(), key, key.PrefixEnd(), 1) if err != nil { t.Error(err) @@ -5331,8 +5331,8 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v JSON); } tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(tableDesc.PublicNonPrimaryIndexes()) > 0 || len(tableDesc.Mutations) > 0 { - t.Fatalf("descriptor broken %d, %d", len(tableDesc.PublicNonPrimaryIndexes()), len(tableDesc.Mutations)) + if len(tableDesc.PublicNonPrimaryIndexes()) > 0 || len(tableDesc.GetMutations()) > 0 { + t.Fatalf("descriptor broken %d, %d", len(tableDesc.PublicNonPrimaryIndexes()), len(tableDesc.GetMutations())) } } @@ -5997,7 +5997,7 @@ INSERT INTO t.test (k, v) VALUES (1, 99), (2, 100); Username: security.RootUserName(), Description: "ALTER TABLE t.public.test ADD COLUMN a INT8 AS (v - 1) STORED, ADD CHECK ((a < v) AND (a IS NOT NULL))", DescriptorIDs: descpb.IDs{ - tableDesc.ID, + tableDesc.GetID(), }, }) } @@ -6130,27 +6130,27 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); } tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if e := 1; e != len(tableDesc.GCMutations) { - t.Fatalf("e = %d, v = %d", e, len(tableDesc.GCMutations)) + if e := 1; e != len(tableDesc.GetGCMutations()) { + t.Fatalf("e = %d, v = %d", e, len(tableDesc.GetGCMutations())) } // Delete the associated job. - jobID := tableDesc.GCMutations[0].JobID + jobID := tableDesc.GetGCMutations()[0].JobID if _, err := sqlDB.Exec(fmt.Sprintf("DELETE FROM system.jobs WHERE id=%d", jobID)); err != nil { t.Fatal(err) } // Ensure the GCMutations has not yet been completed. tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if e := 1; e != len(tableDesc.GCMutations) { - t.Fatalf("e = %d, v = %d", e, len(tableDesc.GCMutations)) + if e := 1; e != len(tableDesc.GetGCMutations()) { + t.Fatalf("e = %d, v = %d", e, len(tableDesc.GetGCMutations())) } // Enable async schema change processing for purged schema changes. atomic.StoreUint32(&enableAsyncSchemaChanges, 1) // Add immediate GC TTL to allow index creation purge to complete. - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()); err != nil { t.Fatal(err) } @@ -6158,8 +6158,8 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); // cleared. testutils.SucceedsSoon(t, func() error { tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - if len(tableDesc.GCMutations) > 0 { - return errors.Errorf("%d gc mutations remaining", len(tableDesc.GCMutations)) + if len(tableDesc.GetGCMutations()) > 0 { + return errors.Errorf("%d gc mutations remaining", len(tableDesc.GetGCMutations())) } return nil }) @@ -6274,7 +6274,7 @@ INSERT INTO t.test VALUES (1, 2), (2, 2); require.NoError(t, err) tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Add a zone config for the table. - _, err = sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.ID) + _, err = sqltestutils.AddImmediateGCZoneConfig(sqlDB, tableDesc.GetID()) require.NoError(t, err) // Try to create a unique index which won't be valid and will need a rollback. diff --git a/pkg/sql/scrub.go b/pkg/sql/scrub.go index e6e5f7416da9..4e80b9a8060e 100644 --- a/pkg/sql/scrub.go +++ b/pkg/sql/scrub.go @@ -15,11 +15,11 @@ import ( "fmt" "strings" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" @@ -193,7 +193,7 @@ func (n *scrubNode) startScrubDatabase(ctx context.Context, p *planner, name *tr if err != nil { return err } - tableDesc := objDesc.(*tabledesc.Immutable) + tableDesc := objDesc.(catalog.TableDescriptor) // Skip non-tables and don't throw an error if we encounter one. if !tableDesc.IsTable() { continue @@ -206,7 +206,7 @@ func (n *scrubNode) startScrubDatabase(ctx context.Context, p *planner, name *tr } func (n *scrubNode) startScrubTable( - ctx context.Context, p *planner, tableDesc *tabledesc.Immutable, tableName *tree.TableName, + ctx context.Context, p *planner, tableDesc catalog.TableDescriptor, tableName *tree.TableName, ) error { ts, hasTS, err := p.getTimestamp(ctx, n.n.AsOf) if err != nil { @@ -284,7 +284,7 @@ func (n *scrubNode) startScrubTable( // getPrimaryColIdxs returns a list of the primary index columns and // their corresponding index in the columns list. func getPrimaryColIdxs( - tableDesc *tabledesc.Immutable, columns []*descpb.ColumnDescriptor, + tableDesc catalog.TableDescriptor, columns []*descpb.ColumnDescriptor, ) (primaryColIdxs []int, err error) { for i := 0; i < tableDesc.GetPrimaryIndex().NumColumns(); i++ { colID := tableDesc.GetPrimaryIndex().GetColumnID(i) @@ -344,7 +344,7 @@ func pairwiseOp(left []string, right []string, op string) []string { // createPhysicalCheckOperations will return the physicalCheckOperation // for all indexes on a table. func createPhysicalCheckOperations( - tableDesc *tabledesc.Immutable, tableName *tree.TableName, + tableDesc catalog.TableDescriptor, tableName *tree.TableName, ) (checks []checkOperation) { for _, idx := range tableDesc.ActiveIndexes() { checks = append(checks, newPhysicalCheckOperation(tableName, tableDesc, idx.IndexDesc())) @@ -360,7 +360,7 @@ func createPhysicalCheckOperations( // first invalid index. func createIndexCheckOperations( indexNames tree.NameList, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, tableName *tree.TableName, asOf hlc.Timestamp, ) (results []checkOperation, err error) { @@ -404,7 +404,7 @@ func createIndexCheckOperations( } return nil, pgerror.Newf(pgcode.UndefinedObject, "specified indexes to check that do not exist on table %q: %v", - tableDesc.Name, strings.Join(missingIndexNames, ", ")) + tableDesc.GetName(), strings.Join(missingIndexNames, ", ")) } return results, nil } @@ -418,7 +418,7 @@ func createConstraintCheckOperations( ctx context.Context, p *planner, constraintNames tree.NameList, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, tableName *tree.TableName, asOf hlc.Timestamp, ) (results []checkOperation, err error) { @@ -437,7 +437,7 @@ func createConstraintCheckOperations( wantedConstraints[string(constraintName)] = v } else { return nil, pgerror.Newf(pgcode.UndefinedObject, - "constraint %q of relation %q does not exist", constraintName, tableDesc.Name) + "constraint %q of relation %q does not exist", constraintName, tableDesc.GetName()) } } constraints = wantedConstraints diff --git a/pkg/sql/scrub_constraint.go b/pkg/sql/scrub_constraint.go index 62c2c09d903a..d605d6a21ff7 100644 --- a/pkg/sql/scrub_constraint.go +++ b/pkg/sql/scrub_constraint.go @@ -15,6 +15,7 @@ import ( "go/constant" "time" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -27,7 +28,7 @@ import ( // CHECK constraint on a table. type sqlCheckConstraintCheckOperation struct { tableName *tree.TableName - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor checkDesc *descpb.TableDescriptor_CheckConstraint asOf hlc.Timestamp @@ -51,7 +52,7 @@ type sqlCheckConstraintCheckRun struct { func newSQLCheckConstraintCheckOperation( tableName *tree.TableName, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, checkDesc *descpb.TableDescriptor_CheckConstraint, asOf hlc.Timestamp, ) *sqlCheckConstraintCheckOperation { @@ -79,7 +80,7 @@ func (o *sqlCheckConstraintCheckOperation) Start(params runParams) error { tn.ExplicitCatalog = true tn.ExplicitSchema = true sel := &tree.SelectClause{ - Exprs: tabledesc.ColumnsSelectors(o.tableDesc.Columns), + Exprs: tabledesc.ColumnsSelectors(o.tableDesc.GetPublicColumns()), From: tree.From{ Tables: tree.TableExprs{&tn}, }, @@ -108,8 +109,8 @@ func (o *sqlCheckConstraintCheckOperation) Start(params runParams) error { o.run.rows = rows // Collect all the columns. - for i := range o.tableDesc.Columns { - o.columns = append(o.columns, &o.tableDesc.Columns[i]) + for i := range o.tableDesc.GetPublicColumns() { + o.columns = append(o.columns, &o.tableDesc.GetPublicColumns()[i]) } // Find the row indexes for all of the primary index columns. o.primaryColIdxs, err = getPrimaryColIdxs(o.tableDesc, o.columns) diff --git a/pkg/sql/scrub_fk.go b/pkg/sql/scrub_fk.go index ceba8cbff41e..ff4a71e4d62e 100644 --- a/pkg/sql/scrub_fk.go +++ b/pkg/sql/scrub_fk.go @@ -25,8 +25,8 @@ import ( // sqlForeignKeyCheckOperation is a check on an indexes physical data. type sqlForeignKeyCheckOperation struct { tableName *tree.TableName - tableDesc *tabledesc.Immutable - referencedTableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor + referencedTableDesc catalog.TableDescriptor constraint *descpb.ConstraintDetail asOf hlc.Timestamp @@ -45,7 +45,7 @@ type sqlForeignKeyConstraintCheckRun struct { func newSQLForeignKeyCheckOperation( tableName *tree.TableName, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, constraint descpb.ConstraintDetail, asOf hlc.Timestamp, ) *sqlForeignKeyCheckOperation { @@ -106,9 +106,9 @@ func (o *sqlForeignKeyCheckOperation) Start(params runParams) error { // columns and extra columns in the secondary index used for foreign // key referencing. This also implicitly includes all primary index // columns. - columnsByID := make(map[descpb.ColumnID]*descpb.ColumnDescriptor, len(o.tableDesc.Columns)) - for i := range o.tableDesc.Columns { - columnsByID[o.tableDesc.Columns[i].ID] = &o.tableDesc.Columns[i] + columnsByID := make(map[descpb.ColumnID]*descpb.ColumnDescriptor, len(o.tableDesc.GetPublicColumns())) + for i := range o.tableDesc.GetPublicColumns() { + columnsByID[o.tableDesc.GetPublicColumns()[i].ID] = &o.tableDesc.GetPublicColumns()[i] } // Get primary key columns not included in the FK. diff --git a/pkg/sql/scrub_index.go b/pkg/sql/scrub_index.go index 6a1d566e7c1b..8063288d65b3 100644 --- a/pkg/sql/scrub_index.go +++ b/pkg/sql/scrub_index.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -33,7 +32,7 @@ import ( // that refers to a primary index key that cannot be found. type indexCheckOperation struct { tableName *tree.TableName - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor indexDesc *descpb.IndexDescriptor asOf hlc.Timestamp @@ -58,7 +57,7 @@ type indexCheckRun struct { func newIndexCheckOperation( tableName *tree.TableName, - tableDesc *tabledesc.Immutable, + tableDesc catalog.TableDescriptor, indexDesc *descpb.IndexDescriptor, asOf hlc.Timestamp, ) *indexCheckOperation { @@ -76,8 +75,8 @@ func (o *indexCheckOperation) Start(params runParams) error { ctx := params.ctx var colToIdx catalog.TableColMap - for i := range o.tableDesc.Columns { - id := o.tableDesc.Columns[i].ID + for i := range o.tableDesc.GetPublicColumns() { + id := o.tableDesc.GetPublicColumns()[i].ID colToIdx.Set(id, i) } @@ -85,7 +84,7 @@ func (o *indexCheckOperation) Start(params runParams) error { for i := 0; i < o.tableDesc.GetPrimaryIndex().NumColumns(); i++ { colID := o.tableDesc.GetPrimaryIndex().GetColumnID(i) - col := &o.tableDesc.Columns[colToIdx.GetDefault(colID)] + col := &o.tableDesc.GetPublicColumns()[colToIdx.GetDefault(colID)] pkColumns = append(pkColumns, col) colToIdx.Set(colID, -1) } @@ -96,7 +95,7 @@ func (o *indexCheckOperation) Start(params runParams) error { // Skip PK column. return } - col := &o.tableDesc.Columns[pos] + col := &o.tableDesc.GetPublicColumns()[pos] otherColumns = append(otherColumns, col) } @@ -122,7 +121,7 @@ func (o *indexCheckOperation) Start(params runParams) error { } checkQuery := createIndexCheckQuery( - colNames(pkColumns), colNames(otherColumns), o.tableDesc.ID, o.indexDesc.ID, + colNames(pkColumns), colNames(otherColumns), o.tableDesc.GetID(), o.indexDesc.ID, ) rows, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.Query( diff --git a/pkg/sql/scrub_physical.go b/pkg/sql/scrub_physical.go index 11cfa9f062be..6fb263a32244 100644 --- a/pkg/sql/scrub_physical.go +++ b/pkg/sql/scrub_physical.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" "github.com/cockroachdb/cockroach/pkg/sql/rowexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -29,7 +28,7 @@ var _ checkOperation = &physicalCheckOperation{} // physicalCheckOperation is a check on an indexes physical data. type physicalCheckOperation struct { tableName *tree.TableName - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor indexDesc *descpb.IndexDescriptor // columns is a list of the columns returned in the query result @@ -51,7 +50,7 @@ type physicalCheckRun struct { } func newPhysicalCheckOperation( - tableName *tree.TableName, tableDesc *tabledesc.Immutable, indexDesc *descpb.IndexDescriptor, + tableName *tree.TableName, tableDesc catalog.TableDescriptor, indexDesc *descpb.IndexDescriptor, ) *physicalCheckOperation { return &physicalCheckOperation{ tableName: tableName, @@ -69,14 +68,14 @@ func (o *physicalCheckOperation) Start(params runParams) error { var columnIDs []tree.ColumnID var colIDToIdx catalog.TableColMap columns := make([]*descpb.ColumnDescriptor, len(columnIDs)) - for i := range o.tableDesc.Columns { - colIDToIdx.Set(o.tableDesc.Columns[i].ID, i) + for i := range o.tableDesc.GetPublicColumns() { + colIDToIdx.Set(o.tableDesc.GetPublicColumns()[i].ID, i) } // Collect all of the columns being scanned. if o.indexDesc.ID == o.tableDesc.GetPrimaryIndexID() { - for i := range o.tableDesc.Columns { - columnIDs = append(columnIDs, tree.ColumnID(o.tableDesc.Columns[i].ID)) + for i := range o.tableDesc.GetPublicColumns() { + columnIDs = append(columnIDs, tree.ColumnID(o.tableDesc.GetPublicColumns()[i].ID)) } } else { for _, id := range o.indexDesc.ColumnIDs { @@ -92,7 +91,7 @@ func (o *physicalCheckOperation) Start(params runParams) error { for i := range columnIDs { idx := colIDToIdx.GetDefault(descpb.ColumnID(columnIDs[i])) - columns = append(columns, &o.tableDesc.Columns[idx]) + columns = append(columns, &o.tableDesc.GetPublicColumns()[idx]) } // Find the row indexes for all of the primary index columns. diff --git a/pkg/sql/scrub_test.go b/pkg/sql/scrub_test.go index 5cb5cb2da662..268f23b686d3 100644 --- a/pkg/sql/scrub_test.go +++ b/pkg/sql/scrub_test.go @@ -63,8 +63,8 @@ INSERT INTO t."tEst" VALUES (10, 20); secondaryIndex := tableDesc.PublicNonPrimaryIndexes()[0] var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.Columns[0].ID, 0) - colIDtoRowIndex.Set(tableDesc.Columns[1].ID, 1) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[0].ID, 0) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[1].ID, 1) // Construct the secondary index key that is currently in the // database. @@ -132,8 +132,8 @@ CREATE INDEX secondary ON t.test (v); secondaryIndex := tableDesc.PublicNonPrimaryIndexes()[0] var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.Columns[0].ID, 0) - colIDtoRowIndex.Set(tableDesc.Columns[1].ID, 1) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[0].ID, 0) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[1].ID, 1) // Construct datums and secondary k/v for our row values (k, v). values := []tree.Datum{tree.NewDInt(10), tree.NewDInt(314)} @@ -226,9 +226,9 @@ INSERT INTO t.test VALUES (10, 20, 1337); secondaryIndex := tableDesc.PublicNonPrimaryIndexes()[0] var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.Columns[0].ID, 0) - colIDtoRowIndex.Set(tableDesc.Columns[1].ID, 1) - colIDtoRowIndex.Set(tableDesc.Columns[2].ID, 2) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[0].ID, 0) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[1].ID, 1) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[2].ID, 2) // Generate the existing secondary index key. values := []tree.Datum{tree.NewDInt(10), tree.NewDInt(20), tree.NewDInt(1337)} @@ -345,8 +345,8 @@ INSERT INTO t.test VALUES (10, 2); tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.Columns[0].ID, 0) - colIDtoRowIndex.Set(tableDesc.Columns[1].ID, 1) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[0].ID, 0) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[1].ID, 1) // Create the primary index key. values := []tree.Datum{tree.NewDInt(10), tree.NewDInt(2)} @@ -359,7 +359,7 @@ INSERT INTO t.test VALUES (10, 2); } // Add the family suffix to the key. - family := tableDesc.Families[0] + family := tableDesc.GetFamilies()[0] primaryIndexKey = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID)) // Generate a k/v that has a different value that violates the @@ -367,7 +367,7 @@ INSERT INTO t.test VALUES (10, 2); values = []tree.Datum{tree.NewDInt(10), tree.NewDInt(0)} // Encode the column value. valueBuf, err := rowenc.EncodeTableValue( - []byte(nil), tableDesc.Columns[1].ID, values[1], []byte(nil)) + []byte(nil), tableDesc.GetPublicColumns()[1].ID, values[1], []byte(nil)) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -448,8 +448,8 @@ func TestScrubFKConstraintFKMissing(t *testing.T) { secondaryIndex := tableDesc.PublicNonPrimaryIndexes()[0] var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.Columns[0].ID, 0) - colIDtoRowIndex.Set(tableDesc.Columns[1].ID, 1) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[0].ID, 0) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[1].ID, 1) // Construct the secondary index key entry as it exists in the // database. @@ -586,8 +586,8 @@ INSERT INTO t.test VALUES (217, 314); values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314)} var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.Columns[0].ID, 0) - colIDtoRowIndex.Set(tableDesc.Columns[1].ID, 1) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[0].ID, 0) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[1].ID, 1) // Create the primary index key primaryIndexKeyPrefix := rowenc.MakeIndexKeyPrefix( @@ -599,7 +599,7 @@ INSERT INTO t.test VALUES (217, 314); } // Add the family suffix to the key. - family := tableDesc.Families[0] + family := tableDesc.GetFamilies()[0] primaryIndexKey = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID)) // Create an empty sentinel value. @@ -668,9 +668,9 @@ INSERT INTO t.test VALUES (217, 314, 1337); values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314), tree.NewDInt(1337)} var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.Columns[0].ID, 0) - colIDtoRowIndex.Set(tableDesc.Columns[1].ID, 1) - colIDtoRowIndex.Set(tableDesc.Columns[2].ID, 2) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[0].ID, 0) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[1].ID, 1) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[2].ID, 2) // Create the primary index key primaryIndexKeyPrefix := rowenc.MakeIndexKeyPrefix( @@ -683,12 +683,12 @@ INSERT INTO t.test VALUES (217, 314, 1337); // Add the family suffix to the key, in particular we care about the // second column family. - family := tableDesc.Families[1] + family := tableDesc.GetFamilies()[1] primaryIndexKey = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID)) // Encode the second column value. valueBuf, err := rowenc.EncodeTableValue( - []byte(nil), tableDesc.Columns[1].ID, values[1], []byte(nil)) + []byte(nil), tableDesc.GetPublicColumns()[1].ID, values[1], []byte(nil)) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -774,8 +774,8 @@ CREATE TABLE t.test ( values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314)} var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.Columns[0].ID, 0) - colIDtoRowIndex.Set(tableDesc.Columns[1].ID, 1) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[0].ID, 0) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[1].ID, 1) // Create the primary index key primaryIndexKeyPrefix := rowenc.MakeIndexKeyPrefix( @@ -787,11 +787,11 @@ CREATE TABLE t.test ( } // Add the correct family suffix to the key. - primaryIndexKeyWithFamily := keys.MakeFamilyKey(primaryIndexKey, uint32(tableDesc.Families[1].ID)) + primaryIndexKeyWithFamily := keys.MakeFamilyKey(primaryIndexKey, uint32(tableDesc.GetFamilies()[1].ID)) // Encode the second column value. valueBuf, err := rowenc.EncodeTableValue( - []byte(nil), tableDesc.Columns[1].ID, values[1], []byte(nil)) + []byte(nil), tableDesc.GetPublicColumns()[1].ID, values[1], []byte(nil)) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -805,11 +805,11 @@ CREATE TABLE t.test ( // Create a k/v with an incorrect family suffix to the key. primaryIndexKeyWithFamily = keys.MakeFamilyKey(primaryIndexKey, - uint32(oldTableDesc.Families[1].ID)) + uint32(oldTableDesc.GetFamilies()[1].ID)) // Encode the second column value. valueBuf, err = rowenc.EncodeTableValue( - []byte(nil), tableDesc.Columns[1].ID, values[1], []byte(nil)) + []byte(nil), tableDesc.GetPublicColumns()[1].ID, values[1], []byte(nil)) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -878,9 +878,9 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v1 INT, v2 INT); values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314), tree.NewDInt(1337)} var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.Columns[0].ID, 0) - colIDtoRowIndex.Set(tableDesc.Columns[1].ID, 1) - colIDtoRowIndex.Set(tableDesc.Columns[2].ID, 2) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[0].ID, 0) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[1].ID, 1) + colIDtoRowIndex.Set(tableDesc.GetPublicColumns()[2].ID, 2) // Create the primary index key primaryIndexKeyPrefix := rowenc.MakeIndexKeyPrefix( @@ -891,12 +891,12 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v1 INT, v2 INT); t.Fatalf("unexpected error: %s", err) } // Add the default family suffix to the key. - primaryIndexKey = keys.MakeFamilyKey(primaryIndexKey, uint32(tableDesc.Families[0].ID)) + primaryIndexKey = keys.MakeFamilyKey(primaryIndexKey, uint32(tableDesc.GetFamilies()[0].ID)) // Encode the second column values. The second column is encoded with // a garbage colIDDiff. valueBuf, err := rowenc.EncodeTableValue( - []byte(nil), tableDesc.Columns[1].ID, values[1], []byte(nil)) + []byte(nil), tableDesc.GetPublicColumns()[1].ID, values[1], []byte(nil)) if err != nil { t.Fatalf("unexpected error: %s", err) } diff --git a/pkg/sql/sequence.go b/pkg/sql/sequence.go index fc6a849a028c..15c036ef7e51 100644 --- a/pkg/sql/sequence.go +++ b/pkg/sql/sequence.go @@ -44,7 +44,7 @@ func (p *planner) GetSerialSequenceNameFromColumn( if err != nil { return nil, err } - for _, col := range tableDesc.Columns { + for _, col := range tableDesc.GetPublicColumns() { if col.ColName() == columnName { // Seems like we have no way of detecting whether this was done using "SERIAL". // Guess by assuming it is SERIAL it it uses only one sequence. @@ -86,13 +86,13 @@ func (p *planner) IncrementSequence(ctx context.Context, seqName *tree.TableName return 0, err } - seqOpts := descriptor.SequenceOpts + seqOpts := descriptor.GetSequenceOpts() var val int64 if seqOpts.Virtual { rowid := builtins.GenerateUniqueInt(p.EvalContext().NodeID.SQLInstanceID()) val = int64(rowid) } else { - seqValueKey := p.ExecCfg().Codec.SequenceKey(uint32(descriptor.ID)) + seqValueKey := p.ExecCfg().Codec.SequenceKey(uint32(descriptor.GetID())) val, err = kv.IncrementValRetryable( ctx, p.txn.DB(), seqValueKey, seqOpts.Increment) if err != nil { @@ -106,13 +106,13 @@ func (p *planner) IncrementSequence(ctx context.Context, seqName *tree.TableName } } - p.ExtendedEvalContext().SessionMutator.RecordLatestSequenceVal(uint32(descriptor.ID), val) + p.ExtendedEvalContext().SessionMutator.RecordLatestSequenceVal(uint32(descriptor.GetID()), val) return val, nil } -func boundsExceededError(descriptor *tabledesc.Immutable) error { - seqOpts := descriptor.SequenceOpts +func boundsExceededError(descriptor catalog.TableDescriptor) error { + seqOpts := descriptor.GetSequenceOpts() isAscending := seqOpts.Increment > 0 var word string @@ -124,10 +124,11 @@ func boundsExceededError(descriptor *tabledesc.Immutable) error { word = "minimum" value = seqOpts.MinValue } + name := descriptor.GetName() return pgerror.Newf( pgcode.SequenceGeneratorLimitExceeded, `reached %s value of sequence %q (%d)`, word, - tree.ErrString((*tree.Name)(&descriptor.Name)), value) + tree.ErrString((*tree.Name)(&name)), value) } // GetLatestValueInSessionForSequence implements the tree.SequenceOperators interface. @@ -140,7 +141,7 @@ func (p *planner) GetLatestValueInSessionForSequence( return 0, err } - val, ok := p.SessionData().SequenceState.GetLastValueByID(uint32(descriptor.ID)) + val, ok := p.SessionData().SequenceState.GetLastValueByID(uint32(descriptor.GetID())) if !ok { return 0, pgerror.Newf( pgcode.ObjectNotInPrerequisiteState, @@ -167,7 +168,7 @@ func (p *planner) SetSequenceValue( return err } - if descriptor.SequenceOpts.Virtual { + if descriptor.GetSequenceOpts().Virtual { // TODO(knz): we currently return an error here, but if/when // CockroachDB grows to automatically make sequences virtual when // clients don't expect it, we may need to make this a no-op @@ -211,12 +212,12 @@ func MakeSequenceKeyVal( // GetSequenceValue returns the current value of the sequence. func (p *planner) GetSequenceValue( - ctx context.Context, codec keys.SQLCodec, desc *tabledesc.Immutable, + ctx context.Context, codec keys.SQLCodec, desc catalog.TableDescriptor, ) (int64, error) { - if desc.SequenceOpts == nil { + if desc.GetSequenceOpts() == nil { return 0, errors.New("descriptor is not a sequence") } - keyValue, err := p.txn.Get(ctx, codec.SequenceKey(uint32(desc.ID))) + keyValue, err := p.txn.Get(ctx, codec.SequenceKey(uint32(desc.GetID()))) if err != nil { return 0, err } diff --git a/pkg/sql/sequence_select.go b/pkg/sql/sequence_select.go index f2c5bf678d26..c9187c4086d1 100644 --- a/pkg/sql/sequence_select.go +++ b/pkg/sql/sequence_select.go @@ -13,7 +13,7 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/errors" ) @@ -21,7 +21,7 @@ import ( type sequenceSelectNode struct { optColumnsSlot - desc *tabledesc.Immutable + desc catalog.TableDescriptor val int64 done bool @@ -29,8 +29,8 @@ type sequenceSelectNode struct { var _ planNode = &sequenceSelectNode{} -func (p *planner) SequenceSelectNode(desc *tabledesc.Immutable) (planNode, error) { - if desc.SequenceOpts == nil { +func (p *planner) SequenceSelectNode(desc catalog.TableDescriptor) (planNode, error) { + if desc.GetSequenceOpts() == nil { return nil, errors.New("descriptor is not a sequence") } return &sequenceSelectNode{ diff --git a/pkg/sql/sequence_test.go b/pkg/sql/sequence_test.go index 8f523ccba7a9..25f84ebe329e 100644 --- a/pkg/sql/sequence_test.go +++ b/pkg/sql/sequence_test.go @@ -18,9 +18,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -163,8 +163,8 @@ func assertColumnOwnsSequences( t *testing.T, kvDB *kv.DB, dbName string, tbName string, colIdx int, seqNames []string, ) { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, dbName, tbName) - col := tableDesc.GetColumns()[colIdx] - var seqDescs []*tabledesc.Immutable + col := tableDesc.GetPublicColumns()[colIdx] + var seqDescs []catalog.TableDescriptor for _, seqName := range seqNames { seqDescs = append( seqDescs, @@ -184,8 +184,8 @@ func assertColumnOwnsSequences( t.Fatalf("unexpected sequence id. expected %d got %d", seqDescs[i].GetID(), seqID) } - ownerTableID := seqDescs[i].SequenceOpts.SequenceOwner.OwnerTableID - ownerColID := seqDescs[i].SequenceOpts.SequenceOwner.OwnerColumnID + ownerTableID := seqDescs[i].GetSequenceOpts().SequenceOwner.OwnerTableID + ownerColID := seqDescs[i].GetSequenceOpts().SequenceOwner.OwnerColumnID if ownerTableID != tableDesc.GetID() || ownerColID != col.ID { t.Fatalf( "unexpected sequence owner. expected table id %d, got: %d; expected column id %d, got :%d", @@ -363,7 +363,7 @@ func addOwnedSequence( kvDB, keys.SystemSQLCodec, dbName, tableName) tableDesc.GetColumns()[colIdx].OwnsSequenceIds = append( - tableDesc.GetColumns()[colIdx].OwnsSequenceIds, seqDesc.ID) + tableDesc.GetColumns()[colIdx].OwnsSequenceIds, seqDesc.GetID()) err := kvDB.Put( context.Background(), @@ -385,12 +385,12 @@ func breakOwnershipMapping( for colIdx := range tableDesc.GetColumns() { for i := range tableDesc.GetColumns()[colIdx].OwnsSequenceIds { - if tableDesc.GetColumns()[colIdx].OwnsSequenceIds[i] == seqDesc.ID { + if tableDesc.GetColumns()[colIdx].OwnsSequenceIds[i] == seqDesc.GetID() { tableDesc.GetColumns()[colIdx].OwnsSequenceIds[i] = math.MaxInt32 } } } - seqDesc.SequenceOpts.SequenceOwner.OwnerTableID = math.MaxInt32 + seqDesc.GetSequenceOpts().SequenceOwner.OwnerTableID = math.MaxInt32 err := kvDB.Put( context.Background(), diff --git a/pkg/sql/show_create.go b/pkg/sql/show_create.go index bf0bf3a09403..111cfb7b2b08 100644 --- a/pkg/sql/show_create.go +++ b/pkg/sql/show_create.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catformat" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -235,12 +234,12 @@ func (p *planner) ShowCreate( ctx context.Context, dbPrefix string, allDescs []descpb.Descriptor, - desc *tabledesc.Immutable, + desc catalog.TableDescriptor, displayOptions ShowCreateDisplayOptions, ) (string, error) { var stmt string var err error - tn := tree.MakeUnqualifiedTableName(tree.Name(desc.Name)) + tn := tree.MakeUnqualifiedTableName(tree.Name(desc.GetName())) if desc.IsView() { stmt, err = ShowCreateView(ctx, &tn, desc) } else if desc.IsSequence() { diff --git a/pkg/sql/show_create_clauses.go b/pkg/sql/show_create_clauses.go index cecc947d1c63..251bb6df991f 100644 --- a/pkg/sql/show_create_clauses.go +++ b/pkg/sql/show_create_clauses.go @@ -269,7 +269,7 @@ func showFamilyClause(desc catalog.TableDescriptor, f *tree.FmtCtx) { // showCreateLocality creates the LOCALITY clauses for a CREATE statement, writing them // to tree.FmtCtx f. func showCreateLocality(desc catalog.TableDescriptor, f *tree.FmtCtx) error { - if c := desc.TableDesc().LocalityConfig; c != nil { + if c := desc.GetLocalityConfig(); c != nil { f.WriteString(" LOCALITY ") return tabledesc.FormatTableLocalityConfig(c, f) } diff --git a/pkg/sql/show_fingerprints.go b/pkg/sql/show_fingerprints.go index 3472fe97afc3..e69110af0de7 100644 --- a/pkg/sql/show_fingerprints.go +++ b/pkg/sql/show_fingerprints.go @@ -16,8 +16,8 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -28,7 +28,7 @@ import ( type showFingerprintsNode struct { optColumnsSlot - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor indexes []*descpb.IndexDescriptor run showFingerprintsRun @@ -96,7 +96,7 @@ func (n *showFingerprintsNode) Next(params runParams) (bool, error) { } index := n.indexes[n.run.rowIdx] - cols := make([]string, 0, len(n.tableDesc.Columns)) + cols := make([]string, 0, len(n.tableDesc.GetPublicColumns())) addColumn := func(col *descpb.ColumnDescriptor) { // TODO(dan): This is known to be a flawed way to fingerprint. Any datum // with the same string representation is fingerprinted the same, even @@ -110,13 +110,13 @@ func (n *showFingerprintsNode) Next(params runParams) (bool, error) { } if index.ID == n.tableDesc.GetPrimaryIndexID() { - for i := range n.tableDesc.Columns { - addColumn(&n.tableDesc.Columns[i]) + for i := range n.tableDesc.GetPublicColumns() { + addColumn(&n.tableDesc.GetPublicColumns()[i]) } } else { colsByID := make(map[descpb.ColumnID]*descpb.ColumnDescriptor) - for i := range n.tableDesc.Columns { - col := &n.tableDesc.Columns[i] + for i := range n.tableDesc.GetPublicColumns() { + col := &n.tableDesc.GetPublicColumns()[i] colsByID[col.ID] = col } colIDs := append(append(index.ColumnIDs, index.ExtraColumnIDs...), index.StoreColumnIDs...) @@ -140,7 +140,7 @@ func (n *showFingerprintsNode) Next(params runParams) (bool, error) { sql := fmt.Sprintf(`SELECT xor_agg(fnv64(%s))::string AS fingerprint FROM [%d AS t]@{FORCE_INDEX=[%d]} - `, strings.Join(cols, `,`), n.tableDesc.ID, index.ID) + `, strings.Join(cols, `,`), n.tableDesc.GetID(), index.ID) // If were'in in an AOST context, propagate it to the inner statement so that // the inner statement gets planned with planner.avoidCachedDescriptors set, // like the outter one. diff --git a/pkg/sql/show_stats.go b/pkg/sql/show_stats.go index 996b54312758..6361738c2e54 100644 --- a/pkg/sql/show_stats.go +++ b/pkg/sql/show_stats.go @@ -14,9 +14,9 @@ import ( "context" encjson "encoding/json" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -79,7 +79,7 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p FROM system.table_statistics WHERE "tableID" = $1 ORDER BY "createdAt"`, - desc.ID, + desc.GetID(), ) if err != nil { return nil, err @@ -189,7 +189,7 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p }, nil } -func statColumnString(desc *tabledesc.Immutable, colID tree.Datum) string { +func statColumnString(desc catalog.TableDescriptor, colID tree.Datum) string { id := descpb.ColumnID(*colID.(*tree.DInt)) colDesc, err := desc.FindColumnByID(id) if err != nil { diff --git a/pkg/sql/span/BUILD.bazel b/pkg/sql/span/BUILD.bazel index 7ab3ebd4a2ea..7119a2607433 100644 --- a/pkg/sql/span/BUILD.bazel +++ b/pkg/sql/span/BUILD.bazel @@ -10,7 +10,6 @@ go_library( "//pkg/roachpb", "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/inverted", "//pkg/sql/opt/constraint", "//pkg/sql/opt/exec", diff --git a/pkg/sql/span/span_builder.go b/pkg/sql/span/span_builder.go index 934e7abbe363..ad9649cf514c 100644 --- a/pkg/sql/span/span_builder.go +++ b/pkg/sql/span/span_builder.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/inverted" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" @@ -34,7 +33,7 @@ import ( type Builder struct { evalCtx *tree.EvalContext codec keys.SQLCodec - table *tabledesc.Immutable + table catalog.TableDescriptor index *descpb.IndexDescriptor indexColTypes []*types.T indexColDirs []descpb.IndexDescriptor_Direction @@ -60,7 +59,7 @@ var _ = (*Builder).UnsetNeededFamilies func MakeBuilder( evalCtx *tree.EvalContext, codec keys.SQLCodec, - table *tabledesc.Immutable, + table catalog.TableDescriptor, index *descpb.IndexDescriptor, ) *Builder { s := &Builder{ @@ -78,7 +77,7 @@ func MakeBuilder( s.indexColTypes = make([]*types.T, len(columnIDs)) for i, colID := range columnIDs { // TODO (rohany): do I need to look at table columns with mutations here as well? - for _, col := range table.Columns { + for _, col := range table.GetPublicColumns() { if col.ID == colID { s.indexColTypes[i] = col.Type break @@ -102,7 +101,7 @@ func MakeBuilder( s.interstices[sharedPrefixLen]) } s.interstices[sharedPrefixLen] = rowenc.EncodePartialTableIDIndexID( - s.interstices[sharedPrefixLen], table.ID, index.ID) + s.interstices[sharedPrefixLen], table.GetID(), index.ID) } return s @@ -198,8 +197,8 @@ func (s *Builder) CanSplitSpanIntoSeparateFamilies( // * The table is not a special system table. (System tables claim to have // column families, but actually do not, since they're written to with // raw KV puts in a "legacy" way.) - isSystemTable := s.table.ID > 0 && s.table.ID < keys.MaxReservedDescID - return !isSystemTable && s.index.Unique && len(s.table.Families) > 1 && + isSystemTable := s.table.GetID() > 0 && s.table.GetID() < keys.MaxReservedDescID + return !isSystemTable && s.index.Unique && len(s.table.GetFamilies()) > 1 && (s.index.ID == s.table.GetPrimaryIndexID() || // Secondary index specific checks. (s.index.Version >= descpb.SecondaryIndexFamilyFormatVersion && @@ -207,7 +206,7 @@ func (s *Builder) CanSplitSpanIntoSeparateFamilies( len(s.index.StoreColumnIDs) > 0 && s.index.Type == descpb.IndexDescriptor_FORWARD)) && prefixLen == len(s.index.ColumnIDs) && - numNeededFamilies < len(s.table.Families) + numNeededFamilies < len(s.table.GetFamilies()) } // Functions for optimizer related span generation are below. diff --git a/pkg/sql/span/span_builder_test.go b/pkg/sql/span/span_builder_test.go index 6775029fc465..5d99a13ed92c 100644 --- a/pkg/sql/span/span_builder_test.go +++ b/pkg/sql/span/span_builder_test.go @@ -23,7 +23,7 @@ func TestSpanBuilderDoesNotSplitSystemTableFamilySpans(t *testing.T) { st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) builder := MakeBuilder(&evalCtx, keys.SystemSQLCodec, systemschema.DescriptorTable, - &systemschema.DescriptorTable.PrimaryIndex) + systemschema.DescriptorTable.GetPrimaryIndex().IndexDesc()) if res := builder.CanSplitSpanIntoSeparateFamilies( 1, 1, false); res { diff --git a/pkg/sql/split.go b/pkg/sql/split.go index 7096d87f0e36..04ec4c4cd660 100644 --- a/pkg/sql/split.go +++ b/pkg/sql/split.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" @@ -29,7 +28,7 @@ import ( type splitNode struct { optColumnsSlot - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor index *descpb.IndexDescriptor rows planNode run splitRun diff --git a/pkg/sql/stats/automatic_stats_test.go b/pkg/sql/stats/automatic_stats_test.go index 8a33877d7532..58ccad109906 100644 --- a/pkg/sql/stats/automatic_stats_test.go +++ b/pkg/sql/stats/automatic_stats_test.go @@ -72,34 +72,34 @@ func TestMaybeRefreshStats(t *testing.T) { refresher := MakeRefresher(st, executor, cache, time.Microsecond /* asOfTime */) // There should not be any stats yet. - if err := checkStatsCount(ctx, cache, descA.ID, 0 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA.GetID(), 0 /* expected */); err != nil { t.Fatal(err) } // There are no stats yet, so this must refresh the statistics on table t // even though rowsAffected=0. refresher.maybeRefreshStats( - ctx, s.Stopper(), descA.ID, 0 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.Stopper(), descA.GetID(), 0 /* rowsAffected */, time.Microsecond, /* asOf */ ) - if err := checkStatsCount(ctx, cache, descA.ID, 1 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA.GetID(), 1 /* expected */); err != nil { t.Fatal(err) } // Try to refresh again. With rowsAffected=0, the probability of a refresh // is 0, so refreshing will not succeed. refresher.maybeRefreshStats( - ctx, s.Stopper(), descA.ID, 0 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.Stopper(), descA.GetID(), 0 /* rowsAffected */, time.Microsecond, /* asOf */ ) - if err := checkStatsCount(ctx, cache, descA.ID, 1 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA.GetID(), 1 /* expected */); err != nil { t.Fatal(err) } // With rowsAffected=10, refreshing should work. Since there are more rows // updated than exist in the table, the probability of a refresh is 100%. refresher.maybeRefreshStats( - ctx, s.Stopper(), descA.ID, 10 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.Stopper(), descA.GetID(), 10 /* rowsAffected */, time.Microsecond, /* asOf */ ) - if err := checkStatsCount(ctx, cache, descA.ID, 2 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA.GetID(), 2 /* expected */); err != nil { t.Fatal(err) } @@ -108,7 +108,7 @@ func TestMaybeRefreshStats(t *testing.T) { // TODO(rytaft): Should not enqueue views to begin with. descVW := catalogkv.TestingGetTableDescriptor(s.DB(), keys.SystemSQLCodec, "t", "vw") refresher.maybeRefreshStats( - ctx, s.Stopper(), descVW.ID, 0 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.Stopper(), descVW.GetID(), 0 /* rowsAffected */, time.Microsecond, /* asOf */ ) select { case <-refresher.mutations: @@ -138,7 +138,7 @@ func TestAverageRefreshTime(t *testing.T) { INSERT INTO t.a VALUES (1);`) executor := s.InternalExecutor().(sqlutil.InternalExecutor) - tableID := catalogkv.TestingGetTableDescriptor(s.DB(), keys.SystemSQLCodec, "t", "a").ID + tableID := catalogkv.TestingGetTableDescriptor(s.DB(), keys.SystemSQLCodec, "t", "a").GetID() cache := NewTableStatisticsCache( 10, /* cacheSize */ gossip.MakeOptionalGossip(s.GossipI().(*gossip.Gossip)), diff --git a/pkg/sql/stats/gossip_invalidation_test.go b/pkg/sql/stats/gossip_invalidation_test.go index 13ab8a8054be..ac16e9415b49 100644 --- a/pkg/sql/stats/gossip_invalidation_test.go +++ b/pkg/sql/stats/gossip_invalidation_test.go @@ -56,7 +56,7 @@ func TestGossipInvalidation(t *testing.T) { sr0.Exec(t, "INSERT INTO test.t VALUES (1, 1), (2, 2), (3, 3)") tableDesc := catalogkv.TestingGetTableDescriptor(tc.Server(0).DB(), keys.SystemSQLCodec, "test", "t") - tableID := tableDesc.ID + tableID := tableDesc.GetID() expectNStats := func(n int) error { stats, err := sc.GetTableStats(ctx, tableID) diff --git a/pkg/sql/stats/stats_cache_test.go b/pkg/sql/stats/stats_cache_test.go index 61d3ced39351..efeee670e840 100644 --- a/pkg/sql/stats/stats_cache_test.go +++ b/pkg/sql/stats/stats_cache_test.go @@ -355,7 +355,7 @@ CREATE STATISTICS s FROM tt; tbl := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "tt") // Get stats for our table. We are ensuring here that the access to the stats // for tt properly hydrates the user defined type t before access. - _, err := sc.GetTableStats(ctx, tbl.ID) + _, err := sc.GetTableStats(ctx, tbl.GetID()) if err != nil { t.Fatal(err) } diff --git a/pkg/sql/table_ref_test.go b/pkg/sql/table_ref_test.go index def879d5350d..a8df080a3080 100644 --- a/pkg/sql/table_ref_test.go +++ b/pkg/sql/table_ref_test.go @@ -47,10 +47,10 @@ CREATE INDEX bc ON test.t(b, c); // Retrieve the numeric descriptors. tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") - tID := tableDesc.ID + tID := tableDesc.GetID() var aID, bID, cID descpb.ColumnID - for i := range tableDesc.Columns { - c := &tableDesc.Columns[i] + for i := range tableDesc.GetPublicColumns() { + c := &tableDesc.GetPublicColumns()[i] switch c.Name { case "a": aID = c.ID @@ -65,10 +65,10 @@ CREATE INDEX bc ON test.t(b, c); // Retrieve the numeric descriptors. tableDesc = catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "hidden") - tIDHidden := tableDesc.ID + tIDHidden := tableDesc.GetID() var rowIDHidden descpb.ColumnID - for i := range tableDesc.Columns { - c := &tableDesc.Columns[i] + for i := range tableDesc.GetPublicColumns() { + c := &tableDesc.GetPublicColumns()[i] switch c.Name { case "rowid": rowIDHidden = c.ID diff --git a/pkg/sql/table_test.go b/pkg/sql/table_test.go index c504131e2cb3..0f6c726c8717 100644 --- a/pkg/sql/table_test.go +++ b/pkg/sql/table_test.go @@ -447,19 +447,19 @@ func TestSerializedUDTsInTableDescriptor(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - getDefault := func(desc *tabledesc.Immutable) string { - return *desc.Columns[0].DefaultExpr + getDefault := func(desc catalog.TableDescriptor) string { + return *desc.GetPublicColumns()[0].DefaultExpr } - getComputed := func(desc *tabledesc.Immutable) string { - return *desc.Columns[0].ComputeExpr + getComputed := func(desc catalog.TableDescriptor) string { + return *desc.GetPublicColumns()[0].ComputeExpr } - getCheck := func(desc *tabledesc.Immutable) string { - return desc.Checks[0].Expr + getCheck := func(desc catalog.TableDescriptor) string { + return desc.GetChecks()[0].Expr } testdata := []struct { colSQL string expectedExpr string - getExpr func(desc *tabledesc.Immutable) string + getExpr func(desc catalog.TableDescriptor) string }{ // Test a simple UDT as the default value. { diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index 5fb385245b18..8643b5f477c1 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -249,17 +249,17 @@ func cleanupSchemaObjects( return err } - tblDescsByID[desc.ID] = desc - tblNamesByID[desc.ID] = tbName + tblDescsByID[desc.GetID()] = desc + tblNamesByID[desc.GetID()] = tbName - databaseIDToTempSchemaID[uint32(desc.ParentID)] = uint32(desc.GetParentSchemaID()) + databaseIDToTempSchemaID[uint32(desc.GetParentID())] = uint32(desc.GetParentSchemaID()) - if desc.SequenceOpts != nil { - sequences = append(sequences, desc.ID) - } else if desc.ViewQuery != "" { - views = append(views, desc.ID) + if desc.GetSequenceOpts() != nil { + sequences = append(sequences, desc.GetID()) + } else if desc.GetViewQuery() != "" { + views = append(views, desc.GetID()) } else { - tables = append(tables, desc.ID) + tables = append(tables, desc.GetID()) } } @@ -320,12 +320,12 @@ func cleanupSchemaObjects( for _, colID := range d.ColumnIDs { dependentColIDs.Add(int(colID)) } - for _, col := range dTableDesc.Columns { + for _, col := range dTableDesc.GetPublicColumns() { if dependentColIDs.Contains(int(col.ID)) { tbName := tree.MakeTableNameWithSchema( tree.Name(db.GetName()), tree.Name(schema), - tree.Name(dTableDesc.Name), + tree.Name(dTableDesc.GetName()), ) _, err = ie.ExecEx( ctx, diff --git a/pkg/sql/tests/BUILD.bazel b/pkg/sql/tests/BUILD.bazel index cda3c774a423..8819f72886ca 100644 --- a/pkg/sql/tests/BUILD.bazel +++ b/pkg/sql/tests/BUILD.bazel @@ -61,11 +61,11 @@ go_test( "//pkg/security/securitytest", "//pkg/server", "//pkg/sql", + "//pkg/sql/catalog", "//pkg/sql/catalog/bootstrap", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/systemschema", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/privilege", diff --git a/pkg/sql/tests/hash_sharded_test.go b/pkg/sql/tests/hash_sharded_test.go index 588edd51347d..6330d7a74f5f 100644 --- a/pkg/sql/tests/hash_sharded_test.go +++ b/pkg/sql/tests/hash_sharded_test.go @@ -16,9 +16,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -28,7 +28,7 @@ import ( // getShardColumnID fetches the id of the shard column associated with the given sharded // index. func getShardColumnID( - t *testing.T, tableDesc *tabledesc.Immutable, shardedIndexName string, + t *testing.T, tableDesc catalog.TableDescriptor, shardedIndexName string, ) descpb.ColumnID { idx, err := tableDesc.FindIndexWithName(shardedIndexName) if err != nil { @@ -47,7 +47,7 @@ func getShardColumnID( // 2. A hidden check constraint was created on the aforementioned shard column. // 3. The first column in the index set is the aforementioned shard column. func verifyTableDescriptorState( - t *testing.T, tableDesc *tabledesc.Immutable, shardedIndexName string, + t *testing.T, tableDesc catalog.TableDescriptor, shardedIndexName string, ) { idx, err := tableDesc.FindIndexWithName(shardedIndexName) if err != nil { diff --git a/pkg/sql/tests/system_table_test.go b/pkg/sql/tests/system_table_test.go index 98a97d1d6565..fe110ada1484 100644 --- a/pkg/sql/tests/system_table_test.go +++ b/pkg/sql/tests/system_table_test.go @@ -22,10 +22,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -162,7 +162,7 @@ func TestSystemTableLiterals(t *testing.T) { type testcase struct { id descpb.ID schema string - pkg *tabledesc.Immutable + pkg catalog.TableDescriptor } for _, test := range []testcase{ @@ -192,7 +192,7 @@ func TestSystemTableLiterals(t *testing.T) { {keys.ScheduledJobsTableID, systemschema.ScheduledJobsTableSchema, systemschema.ScheduledJobsTable}, {keys.SqllivenessID, systemschema.SqllivenessTableSchema, systemschema.SqllivenessTable}, } { - privs := *test.pkg.Privileges + privs := *test.pkg.GetPrivileges() gen, err := sql.CreateTestTableDescriptor( context.Background(), keys.SystemDatabaseID, @@ -208,7 +208,7 @@ func TestSystemTableLiterals(t *testing.T) { if !test.pkg.TableDesc().Equal(gen.TableDesc()) { diff := strings.Join(pretty.Diff(test.pkg.TableDesc(), gen.TableDesc()), "\n") t.Errorf("%s table descriptor generated from CREATE TABLE statement does not match "+ - "hardcoded table descriptor:\n%s", test.pkg.Name, diff) + "hardcoded table descriptor:\n%s", test.pkg.GetName(), diff) } } } diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index 9028c7343998..8c7321e982b2 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -315,7 +315,11 @@ func (p *planner) truncateTable( // can even eliminate the need to use a transaction for each chunk at a later // stage if it proves inefficient). func ClearTableDataInChunks( - ctx context.Context, db *kv.DB, codec keys.SQLCodec, tableDesc *tabledesc.Immutable, traceKV bool, + ctx context.Context, + db *kv.DB, + codec keys.SQLCodec, + tableDesc catalog.TableDescriptor, + traceKV bool, ) error { const chunkSize = row.TableTruncateChunkSize var resume roachpb.Span @@ -323,7 +327,7 @@ func ClearTableDataInChunks( for rowIdx, done := 0, false; !done; rowIdx += chunkSize { resumeAt := resume if traceKV { - log.VEventf(ctx, 2, "table %s truncate at row: %d, span: %s", tableDesc.Name, rowIdx, resume) + log.VEventf(ctx, 2, "table %s truncate at row: %d, span: %s", tableDesc.GetName(), rowIdx, resume) } if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { rd := row.MakeDeleter(codec, tableDesc, nil /* requestedCols */) diff --git a/pkg/sql/unsplit.go b/pkg/sql/unsplit.go index d4e94aac072e..c8faf3fd54ed 100644 --- a/pkg/sql/unsplit.go +++ b/pkg/sql/unsplit.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/errors" @@ -26,7 +25,7 @@ import ( type unsplitNode struct { optColumnsSlot - tableDesc *tabledesc.Immutable + tableDesc catalog.TableDescriptor index *descpb.IndexDescriptor run unsplitRun rows planNode diff --git a/pkg/sql/views.go b/pkg/sql/views.go index 1243c135d836..6d479cb69568 100644 --- a/pkg/sql/views.go +++ b/pkg/sql/views.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -28,7 +27,7 @@ import ( type planDependencyInfo struct { // desc is a reference to the descriptor for the table being // depended on. - desc *tabledesc.Immutable + desc catalog.TableDescriptor // deps is the list of ways in which the current plan depends on // that table. There can be more than one entries when the same // table is used in different places. The entries can also be @@ -49,7 +48,8 @@ type planDependencies map[descpb.ID]planDependencyInfo func (d planDependencies) String() string { var buf bytes.Buffer for id, deps := range d { - fmt.Fprintf(&buf, "%d (%q):", id, tree.ErrNameStringP(&deps.desc.Name)) + name := deps.desc.GetName() + fmt.Fprintf(&buf, "%d (%q):", id, tree.ErrNameStringP(&name)) for _, dep := range deps.deps { buf.WriteString(" [") if dep.IndexID != 0 { diff --git a/pkg/sql/virtual_schema.go b/pkg/sql/virtual_schema.go index acc812b3cd7b..e2f652199677 100644 --- a/pkg/sql/virtual_schema.go +++ b/pkg/sql/virtual_schema.go @@ -376,7 +376,7 @@ func (v *virtualSchemaEntry) GetObjectByName( type virtualDefEntry struct { virtualDef virtualSchemaDef - desc *tabledesc.Immutable + desc catalog.TableDescriptor comment string validWithNoDatabaseContext bool } @@ -427,9 +427,9 @@ func (e *virtualDefEntry) validateRow(datums tree.Datums, columns colinfo.Result col := &columns[i] datum := datums[i] if datum == tree.DNull { - if !e.desc.Columns[i].Nullable { + if !e.desc.GetPublicColumns()[i].Nullable { return errors.AssertionFailedf("column %s.%s not nullable, but found NULL value", - e.desc.Name, col.Name) + e.desc.GetName(), col.Name) } } else if !datum.ResolvedType().Equivalent(col.Typ) { return errors.AssertionFailedf("datum column %q expected to be type %s; found type %s", @@ -449,8 +449,8 @@ func (e *virtualDefEntry) getPlanInfo( idxConstraint *constraint.Constraint, ) (colinfo.ResultColumns, virtualTableConstructor) { var columns colinfo.ResultColumns - for i := range e.desc.Columns { - col := &e.desc.Columns[i] + for i := range e.desc.GetPublicColumns() { + col := &e.desc.GetPublicColumns()[i] columns = append(columns, colinfo.ResultColumn{ Name: col.Name, Typ: col.Type, @@ -507,7 +507,7 @@ func (e *virtualDefEntry) getPlanInfo( if index.ID == 1 { return nil, errors.AssertionFailedf( - "programming error: can't constrain scan on primary virtual index of table %s", e.desc.Name) + "programming error: can't constrain scan on primary virtual index of table %s", e.desc.GetName()) } // Figure out the ordinal position of the column that we're filtering on. @@ -737,7 +737,7 @@ func (vs *VirtualSchemaHolder) getVirtualTableEntryByID(id descpb.ID) (*virtualD // VirtualTabler is used to fetch descriptors for virtual tables and databases. type VirtualTabler interface { - getVirtualTableDesc(tn *tree.TableName) (*tabledesc.Immutable, error) + getVirtualTableDesc(tn *tree.TableName) (catalog.TableDescriptor, error) getVirtualSchemaEntry(name string) (*virtualSchemaEntry, bool) getVirtualTableEntry(tn *tree.TableName) (*virtualDefEntry, error) getVirtualTableEntryByID(id descpb.ID) (*virtualDefEntry, error) @@ -750,7 +750,7 @@ type VirtualTabler interface { // getVirtualTableDesc is part of the VirtualTabler interface. func (vs *VirtualSchemaHolder) getVirtualTableDesc( tn *tree.TableName, -) (*tabledesc.Immutable, error) { +) (catalog.TableDescriptor, error) { t, err := vs.getVirtualTableEntry(tn) if err != nil || t == nil { return nil, err diff --git a/pkg/sql/virtual_table.go b/pkg/sql/virtual_table.go index 52ed3e1c0957..6123d9e70bc4 100644 --- a/pkg/sql/virtual_table.go +++ b/pkg/sql/virtual_table.go @@ -14,10 +14,10 @@ import ( "context" "sync" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" @@ -203,7 +203,7 @@ type vTableLookupJoinNode struct { dbName string db *dbdesc.Immutable - table *tabledesc.Immutable + table catalog.TableDescriptor index *descpb.IndexDescriptor // eqCol is the single equality column ordinal into the lookup table. Virtual // indexes only support a single indexed column currently. diff --git a/pkg/sql/zone_config.go b/pkg/sql/zone_config.go index db8aa866cba5..4f040fbf86b7 100644 --- a/pkg/sql/zone_config.go +++ b/pkg/sql/zone_config.go @@ -264,7 +264,7 @@ func (p *planner) resolveTableForZone( res = mutRes } } else if zs.TargetsTable() { - var immutRes *tabledesc.Immutable + var immutRes catalog.TableDescriptor p.runWithOptions(resolveFlags{skipCache: true}, func() { flags := tree.ObjectLookupFlagsWithRequiredTableKind(tree.ResolveAnyTableKind) flags.IncludeOffline = true diff --git a/pkg/sqlmigrations/migrations.go b/pkg/sqlmigrations/migrations.go index 58e6282a5337..8868a0032439 100644 --- a/pkg/sqlmigrations/migrations.go +++ b/pkg/sqlmigrations/migrations.go @@ -825,7 +825,7 @@ func (m *Manager) migrateSystemNamespace( q := fmt.Sprintf( `SELECT "parentID", name, id FROM [%d AS namespace_deprecated] WHERE id NOT IN (SELECT id FROM [%d AS namespace]) LIMIT %d`, - systemschema.DeprecatedNamespaceTable.ID, systemschema.NamespaceTable.ID, batchSize+1) + systemschema.DeprecatedNamespaceTable.GetID(), systemschema.NamespaceTable.GetID(), batchSize+1) rows, err := r.sqlExecutor.QueryEx( ctx, "read-deprecated-namespace-table", txn, sessiondata.InternalExecutorOverride{ @@ -947,7 +947,7 @@ func createNewSystemNamespaceDescriptor(ctx context.Context, r runner) error { if err != nil { return err } - descpb.TableFromDescriptor(deprecatedDesc, ts).Name = systemschema.DeprecatedNamespaceTable.Name + descpb.TableFromDescriptor(deprecatedDesc, ts).Name = systemschema.DeprecatedNamespaceTable.GetName() b.Put(deprecatedKey, deprecatedDesc) // The 19.2 namespace table contains an entry for "namespace" which maps to diff --git a/pkg/sqlmigrations/migrations_test.go b/pkg/sqlmigrations/migrations_test.go index de64f5a86c15..683e1f23a83d 100644 --- a/pkg/sqlmigrations/migrations_test.go +++ b/pkg/sqlmigrations/migrations_test.go @@ -545,7 +545,7 @@ func TestCreateSystemTable(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() - table := tabledesc.NewExistingMutable(systemschema.NamespaceTable.TableDescriptor) + table := tabledesc.NewExistingMutable(*systemschema.NamespaceTable.TableDesc()) table.ID = keys.MaxReservedDescID prevPrivileges, ok := descpb.SystemAllowedPrivileges[table.ID] @@ -799,16 +799,16 @@ func TestMigrateNamespaceTableDescriptors(t *testing.T) { ts, err := txn.GetProtoTs(ctx, key, desc) require.NoError(t, err) table := descpb.TableFromDescriptor(desc, ts) - table.CreateAsOfTime = systemschema.NamespaceTable.CreateAsOfTime - table.ModificationTime = systemschema.NamespaceTable.ModificationTime + table.CreateAsOfTime = systemschema.NamespaceTable.GetCreateAsOfTime() + table.ModificationTime = systemschema.NamespaceTable.GetModificationTime() require.True(t, table.Equal(systemschema.NamespaceTable.TableDesc())) } { ts, err := txn.GetProtoTs(ctx, deprecatedKey, desc) require.NoError(t, err) table := descpb.TableFromDescriptor(desc, ts) - table.CreateAsOfTime = systemschema.DeprecatedNamespaceTable.CreateAsOfTime - table.ModificationTime = systemschema.DeprecatedNamespaceTable.ModificationTime + table.CreateAsOfTime = systemschema.DeprecatedNamespaceTable.GetCreateAsOfTime() + table.ModificationTime = systemschema.DeprecatedNamespaceTable.GetModificationTime() require.True(t, table.Equal(systemschema.DeprecatedNamespaceTable.TableDesc())) } return nil @@ -840,7 +840,7 @@ CREATE TABLE system.jobs ( keys.SystemDatabaseID, keys.JobsTableID, oldJobsTableSchema, - systemschema.JobsTable.Privileges, + systemschema.JobsTable.GetPrivileges(), ) require.NoError(t, err) @@ -880,14 +880,14 @@ CREATE TABLE system.jobs ( newJobsTable := catalogkv.TestingGetTableDescriptor( mt.kvDB, keys.SystemSQLCodec, "system", "jobs") - require.Equal(t, 7, len(newJobsTable.Columns)) - require.Equal(t, "created_by_type", newJobsTable.Columns[5].Name) - require.Equal(t, "created_by_id", newJobsTable.Columns[6].Name) - require.Equal(t, 2, len(newJobsTable.Families)) + require.Equal(t, 7, len(newJobsTable.GetPublicColumns())) + require.Equal(t, "created_by_type", newJobsTable.GetPublicColumns()[5].Name) + require.Equal(t, "created_by_id", newJobsTable.GetPublicColumns()[6].Name) + require.Equal(t, 2, len(newJobsTable.GetFamilies())) // Ensure we keep old family name. - require.Equal(t, primaryFamilyName, newJobsTable.Families[0].Name) + require.Equal(t, primaryFamilyName, newJobsTable.GetFamilies()[0].Name) // Make sure our primary family has new columns added to it. - require.Equal(t, newPrimaryFamilyColumns, newJobsTable.Families[0].ColumnNames) + require.Equal(t, newPrimaryFamilyColumns, newJobsTable.GetFamilies()[0].ColumnNames) // Run the migration again -- it should be a no-op. require.NoError(t, mt.runMigration(ctx, migration)) @@ -925,7 +925,7 @@ func TestVersionAlterSystemJobsAddSqllivenessColumnsAddNewSystemSqllivenessTable keys.SystemDatabaseID, keys.JobsTableID, oldJobsTableSchema, - systemschema.JobsTable.Privileges, + systemschema.JobsTable.GetPrivileges(), ) require.NoError(t, err) @@ -962,15 +962,15 @@ func TestVersionAlterSystemJobsAddSqllivenessColumnsAddNewSystemSqllivenessTable newJobsTable := catalogkv.TestingGetTableDescriptor( mt.kvDB, keys.SystemSQLCodec, "system", "jobs") - require.Equal(t, 9, len(newJobsTable.Columns)) - require.Equal(t, "claim_session_id", newJobsTable.Columns[7].Name) - require.Equal(t, "claim_instance_id", newJobsTable.Columns[8].Name) - require.Equal(t, 3, len(newJobsTable.Families)) + require.Equal(t, 9, len(newJobsTable.GetPublicColumns())) + require.Equal(t, "claim_session_id", newJobsTable.GetPublicColumns()[7].Name) + require.Equal(t, "claim_instance_id", newJobsTable.GetPublicColumns()[8].Name) + require.Equal(t, 3, len(newJobsTable.GetFamilies())) // Ensure we keep old family names. - require.Equal(t, "fam_0_id_status_created_payload", newJobsTable.Families[0].Name) - require.Equal(t, "progress", newJobsTable.Families[1].Name) + require.Equal(t, "fam_0_id_status_created_payload", newJobsTable.GetFamilies()[0].Name) + require.Equal(t, "progress", newJobsTable.GetFamilies()[1].Name) // ... and that the new one is here. - require.Equal(t, "claim", newJobsTable.Families[2].Name) + require.Equal(t, "claim", newJobsTable.GetFamilies()[2].Name) // Run the migration again -- it should be a no-op. require.NoError(t, mt.runMigration(ctx, migration)) diff --git a/pkg/testutils/testcluster/testcluster_test.go b/pkg/testutils/testcluster/testcluster_test.go index 9947a9abca4d..0e3dabd66dd7 100644 --- a/pkg/testutils/testcluster/testcluster_test.go +++ b/pkg/testutils/testcluster/testcluster_test.go @@ -64,7 +64,7 @@ func TestManualReplication(t *testing.T) { kvDB := tc.Servers[0].DB() tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - tableStartKey := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) + tableStartKey := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.GetID())) leftRangeDesc, tableRangeDesc, err := tc.SplitRange(tableStartKey) if err != nil { t.Fatal(err)