Skip to content

Commit

Permalink
sql: delete TestGCJobWaitsForProtectedTimestamps
Browse files Browse the repository at this point in the history
Informs cockroachdb#85876

In the legacy gc path, we explictly check protected timestamps.
The new gc path does not need this since the mvcc gc queue handles
protected timestamps, and we can remove the resonposibility of
testing the PTS logic since kv is handling it.

Release note: None
  • Loading branch information
chengxiong-ruan committed Jul 6, 2023
1 parent 972c379 commit 5276934
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 324 deletions.
4 changes: 0 additions & 4 deletions pkg/ccl/testccl/sqlccl/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,6 @@ go_test(
"//pkg/settings/cluster",
"//pkg/spanconfig",
"//pkg/sql",
"//pkg/sql/catalog",
"//pkg/sql/catalog/descpb",
"//pkg/sql/catalog/desctestutils",
"//pkg/sql/gcjob",
"//pkg/sql/isql",
"//pkg/sql/lexbase",
Expand All @@ -49,7 +46,6 @@ go_test(
"//pkg/sql/tests",
"//pkg/testutils",
"//pkg/testutils/serverutils",
"//pkg/testutils/skip",
"//pkg/testutils/sqlutils",
"//pkg/testutils/testcluster",
"//pkg/util/hlc",
Expand Down
320 changes: 0 additions & 320 deletions pkg/ccl/testccl/sqlccl/tenant_gc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,20 +26,15 @@ import (
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/gcjob"
"github.com/cockroachdb/cockroach/pkg/sql/isql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
Expand Down Expand Up @@ -150,321 +145,6 @@ func TestGCTenantRemovesSpanConfigs(t *testing.T) {
require.Equal(t, len(records), beforeDelete-2)
}

// TestGCTableOrIndexWaitsForProtectedTimestamps is an integration test to
// ensure that the GC job responsible for clear ranging a dropped table or
// index's data respects protected timestamps. We run two variants -- one for
// tables and another for indexes. Moreover, these 2 variants are run for both
// the host tenant and a secondary tenant. The sketch is as follows:
// Setup of PTS-es on system span configs:
// Entire keyspace: (ts@8, ts@23, ts@30)
// Host on secondary tenant: (ts@6, ts@28, ts@35)
// Tenant on tenant: (ts@2, ts@40)
// Host on another secondary tenant not being tested: (ts@3, ts@4, ts@5)
// We drop the object at ts@10. We then start updating system span configs or
// entirely removing them to unblock GC in the order above. For the host, we
// ensure only the PTS from the entire keyspace target applies. For the
// secondary tenant variant, we ensure that the first 3 system span configs
// all have effect.
func TestGCTableOrIndexWaitsForProtectedTimestamps(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.WithIssue(t, 85876)
defer gcjob.SetSmallMaxGCIntervalForTest()()

var mu struct {
syncutil.Mutex

isSet bool
isProtected bool
jobID jobspb.JobID
}

ctx := context.Background()
ts, sqlDBRaw, _ := serverutils.StartServer(t, base.TestServerArgs{
Knobs: base.TestingKnobs{
JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
GCJob: &sql.GCJobTestingKnobs{
RunAfterIsProtectedCheck: func(jobID jobspb.JobID, isProtected bool) {
mu.Lock()
defer mu.Unlock()

if jobID != mu.jobID { // not the job we're interested in
return
}
mu.isSet = true
mu.isProtected = isProtected
},
},
},
})
defer ts.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(sqlDBRaw)

sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'")
tsKVAccessor := ts.SpanConfigKVAccessor().(spanconfig.KVAccessor)

tenantID := roachpb.MustMakeTenantID(10)

tt, ttSQLDBRaw := serverutils.StartTenant(
t, ts, base.TestTenantArgs{
TenantID: tenantID,
TestingKnobs: base.TestingKnobs{
GCJob: &sql.GCJobTestingKnobs{
RunAfterIsProtectedCheck: func(jobID jobspb.JobID, isProtected bool) {
mu.Lock()
defer mu.Unlock()

if jobID != mu.jobID { // not the job we're interested in
return
}
mu.isSet = true
mu.isProtected = isProtected
},
},
},
Stopper: ts.Stopper(),
},
)
ttSQLDB := sqlutils.MakeSQLRunner(ttSQLDBRaw)
ttKVAccessor := ts.SpanConfigKVAccessor().(spanconfig.KVAccessor)

makeIndexGCJobRecord := func(dropTime int64, indexID descpb.IndexID, parentID descpb.ID) jobs.Record {
return jobs.Record{
Details: jobspb.SchemaChangeGCDetails{
Indexes: []jobspb.SchemaChangeGCDetails_DroppedIndex{
{
IndexID: indexID,
DropTime: dropTime,
},
},
ParentID: parentID,
},
Progress: jobspb.SchemaChangeGCProgress{},
Username: username.TestUserName(),
}
}
makeTableGCJobRecord := func(dropTime int64, id descpb.ID) jobs.Record {
return jobs.Record{
Details: jobspb.SchemaChangeGCDetails{
Tables: []jobspb.SchemaChangeGCDetails_DroppedID{
{
ID: id,
DropTime: dropTime,
},
},
},
Progress: jobspb.SchemaChangeGCProgress{},
Username: username.TestUserName(),
}
}

resetTestingKnob := func() {
mu.Lock()
defer mu.Unlock()
mu.isSet = false
}

makeSystemSpanConfig := func(nanosTimestamps ...int) roachpb.SpanConfig {
var protectionPolicies []roachpb.ProtectionPolicy
for _, nanos := range nanosTimestamps {
protectionPolicies = append(protectionPolicies, roachpb.ProtectionPolicy{
ProtectedTimestamp: hlc.Timestamp{
WallTime: int64(nanos),
},
})
}
return roachpb.SpanConfig{
GCPolicy: roachpb.GCPolicy{
ProtectionPolicies: protectionPolicies,
},
}
}

ensureGCBlockedByPTS := func(t *testing.T, registry *jobs.Registry, sj *jobs.StartableJob) {
testutils.SucceedsSoon(t, func() error {
job, err := registry.LoadJob(ctx, sj.ID())
require.NoError(t, err)
require.Equal(t, jobs.StatusRunning, job.Status())

mu.Lock()
defer mu.Unlock()
if !mu.isSet {
return errors.Newf("waiting for isProtected status to be set")
}
require.True(t, mu.isProtected)
return nil
})
}

ensureGCSucceeded := func(t *testing.T, gcIndex bool, registry *jobs.Registry, sj *jobs.StartableJob) {
// Await job completion, ensure protection status was checked, and it was
// determined safe to GC.
require.NoError(t, sj.AwaitCompletion(ctx))
job, err := registry.LoadJob(ctx, sj.ID())
require.NoError(t, err)
require.Equal(t, jobs.StatusSucceeded, job.Status())
progress := job.Progress()

if gcIndex {
require.Equal(t, jobspb.SchemaChangeGCProgress_CLEARED, progress.GetSchemaChangeGC().Indexes[0].Status)
} else {
require.Equal(t, jobspb.SchemaChangeGCProgress_CLEARED, progress.GetSchemaChangeGC().Tables[0].Status)
}

mu.Lock()
defer mu.Unlock()
require.Equal(t, false, mu.isProtected)
}

testutils.RunTrueAndFalse(t, "gc-index", func(t *testing.T, gcIndex bool) {
testutils.RunTrueAndFalse(t, "secondary-tenant", func(t *testing.T, forSecondaryTenant bool) {
resetTestingKnob()
registry := ts.JobRegistry().(*jobs.Registry)
execCfg := ts.ExecutorConfig().(sql.ExecutorConfig)
sqlRunner := sqlDB
if forSecondaryTenant {
registry = tt.JobRegistry().(*jobs.Registry)
execCfg = tt.ExecutorConfig().(sql.ExecutorConfig)
sqlRunner = ttSQLDB
}
sqlRunner.Exec(t, "DROP DATABASE IF EXISTS db") // start with a clean slate
sqlRunner.Exec(t, "CREATE DATABASE db")
sqlRunner.Exec(t, "CREATE TABLE db.t(i INT, j INT)")
sqlRunner.Exec(t, "CREATE INDEX t_idx ON db.t(j)")

tableDesc := desctestutils.TestingGetTableDescriptor(execCfg.DB, execCfg.Codec, "db", "public", "t")
tableID := tableDesc.GetID()
idx, err := catalog.MustFindIndexByName(tableDesc, "t_idx")
require.NoError(t, err)
indexID := idx.GetID()

// Depending on which version of the test we're running (GC-ing an index
// or GC-ing a table), DROP the index or the table. We'll also create
// another GC job for the table or index with an injected drop time (so
// that it's considered "expired").
if gcIndex {
sqlRunner.Exec(t, "DROP INDEX db.t@t_idx")
} else {
sqlRunner.Exec(t, "DROP TABLE db.t")
}

// Write a PTS over the entire keyspace. We'll include multiple
// timestamps, one of which is before the drop time.
r1, err := spanconfig.MakeRecord(
spanconfig.MakeTargetFromSystemTarget(spanconfig.MakeEntireKeyspaceTarget()),
makeSystemSpanConfig(8, 23, 30),
)
require.NoError(t, err)

// We do something similar for a PTS set by the host over the tenant's
// keyspace.
hostOnTenant, err := spanconfig.MakeTenantKeyspaceTarget(roachpb.SystemTenantID, tenantID)
require.NoError(t, err)
r2, err := spanconfig.MakeRecord(
spanconfig.MakeTargetFromSystemTarget(hostOnTenant),
makeSystemSpanConfig(6, 28, 35),
)
require.NoError(t, err)

// Lastly, we'll also add a PTS set by the host over a different
// secondary tenant's keyspace. This should have no bearing on our test.
hostOnTenant20, err := spanconfig.MakeTenantKeyspaceTarget(
roachpb.SystemTenantID, roachpb.MustMakeTenantID(20),
)
require.NoError(t, err)
r3, err := spanconfig.MakeRecord(
spanconfig.MakeTargetFromSystemTarget(hostOnTenant20),
makeSystemSpanConfig(3, 4, 5),
)

require.NoError(t, err)
err = tsKVAccessor.UpdateSpanConfigRecords(
ctx, nil, []spanconfig.Record{r1, r2, r3}, hlc.MinTimestamp, hlc.MaxTimestamp,
)
require.NoError(t, err)

// One more, this time set by the tenant itself on its entire keyspace.
// Note that we must do this upsert using the tenant's KVAccessor.
tenantOnTenant, err := spanconfig.MakeTenantKeyspaceTarget(tenantID, tenantID)
require.NoError(t, err)
r4, err := spanconfig.MakeRecord(
spanconfig.MakeTargetFromSystemTarget(tenantOnTenant),
makeSystemSpanConfig(2, 40),
)
require.NoError(t, err)
err = ttKVAccessor.UpdateSpanConfigRecords(
ctx, nil, []spanconfig.Record{r4}, hlc.MinTimestamp, hlc.MaxTimestamp,
)
require.NoError(t, err)

// Create a GC-job for the table/index depending on which version of the
// test we're running. We inject a very low drop time so that the object's
// TTL is considered expired.
var record jobs.Record
if gcIndex {
record = makeIndexGCJobRecord(10, indexID, tableID)
} else {
record = makeTableGCJobRecord(10, tableID)
}
jobID := registry.MakeJobID()
mu.Lock()
mu.jobID = jobID
mu.Unlock()
sj, err := jobs.TestingCreateAndStartJob(
ctx, registry, execCfg.InternalDB, record, jobs.WithJobID(jobID),
)
require.NoError(t, err)

ensureGCBlockedByPTS(t, registry, sj)

// Update PTS state that applies to the entire keyspace -- we only
// remove the timestamp before the drop time. We should expect GC to
// succeed if we're testing for the system tenant at this point.
r1, err = spanconfig.MakeRecord(
spanconfig.MakeTargetFromSystemTarget(spanconfig.MakeEntireKeyspaceTarget()),
makeSystemSpanConfig(23, 30),
)
require.NoError(t, err)
err = tsKVAccessor.UpdateSpanConfigRecords(
ctx, nil, []spanconfig.Record{r1}, hlc.MinTimestamp, hlc.MaxTimestamp,
)
require.NoError(t, err)

resetTestingKnob()

// GC should succeed on the system tenant. Exit the test early, we're done.
if !forSecondaryTenant {
ensureGCSucceeded(t, gcIndex, registry, sj)
return
}

// Ensure GC is still blocked for secondary tenants.
ensureGCBlockedByPTS(t, registry, sj)

// Next, we'll remove the host set system span config on our secondary
// tenant.
err = tsKVAccessor.UpdateSpanConfigRecords(
ctx, []spanconfig.Target{r2.GetTarget()}, nil, hlc.MinTimestamp, hlc.MaxTimestamp,
)
require.NoError(t, err)

resetTestingKnob()
ensureGCBlockedByPTS(t, registry, sj)

// The only remaining PTS is from the system span config applied by
// the secondary tenant itself.
err = ttKVAccessor.UpdateSpanConfigRecords(
ctx, []spanconfig.Target{r4.GetTarget()}, nil, hlc.MinTimestamp, hlc.MaxTimestamp,
)
require.NoError(t, err)

// At this point, GC should succeed.
resetTestingKnob()
ensureGCSucceeded(t, gcIndex, registry, sj)
})
})
}

// TestGCTenantJobWaitsForProtectedTimestamps tests that the GC job responsible
// for clearing a dropped tenant's data respects protected timestamp records
// written by the system tenant on the secondary tenant's keyspace.
Expand Down

0 comments on commit 5276934

Please sign in to comment.