Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
116241: sqlstats: read limit test outputs debug info on failure r=abarganier a=dhartunian

Adding output of per-shard stats counts when we fail to find correct limits.

Also removed the check loops for cluster settings. This is a test with a single node and settings are applied synchronously.

Resolves: cockroachdb#115885
Epic: None

Release note: None

Co-authored-by: David Hartunian <[email protected]>
  • Loading branch information
craig[bot] and dhartunian committed Jan 19, 2024
2 parents 2a4e6b8 + f9901ce commit 981d187
Showing 1 changed file with 17 additions and 19 deletions.
36 changes: 17 additions & 19 deletions pkg/sql/sqlstats/persistedsqlstats/flush_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -680,15 +680,6 @@ func TestSQLStatsReadLimitSizeOnLockedTable(t *testing.T) {

// Set sql.stats.persisted_rows.max
sqlConn.Exec(t, fmt.Sprintf("SET CLUSTER SETTING sql.stats.persisted_rows.max=%d", maxNumPersistedRows))
testutils.SucceedsSoon(t, func() error {
var appliedSetting int
row := sqlConn.QueryRow(t, "SHOW CLUSTER SETTING sql.stats.persisted_rows.max")
row.Scan(&appliedSetting)
if appliedSetting != maxNumPersistedRows {
return errors.Newf("waiting for sql.stats.persisted_rows.max to be applied")
}
return nil
})

// We need SucceedsSoon here for the follower read timestamp to catch up
// enough for this state to be reached.
Expand All @@ -711,15 +702,6 @@ func TestSQLStatsReadLimitSizeOnLockedTable(t *testing.T) {
// Set table size check interval to .0000001 second. So the next check doesn't
// use the cached value.
sqlConn.Exec(t, "SET CLUSTER SETTING sql.stats.limit_table_size_check.interval='.0000001s'")
testutils.SucceedsSoon(t, func() error {
var appliedSetting string
row := sqlConn.QueryRow(t, "SHOW CLUSTER SETTING sql.stats.limit_table_size_check.interval")
row.Scan(&appliedSetting)
if appliedSetting != "00:00:00" {
return errors.Newf("waiting for sql.stats.limit_table_size_check.interval to be applied: %s", appliedSetting)
}
return nil
})

// Begin a transaction.
sqlConn.Exec(t, "BEGIN")
Expand All @@ -733,7 +715,23 @@ func TestSQLStatsReadLimitSizeOnLockedTable(t *testing.T) {
for i := 0; i < 3; i++ {
limitReached, err = pss.StmtsLimitSizeReached(ctx)
require.NoError(t, err)
require.True(t, limitReached, "limitReached should be true. Loop :%d", i)
if !limitReached {
readStmt := `SELECT crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8, count(*)
FROM system.statement_statistics
AS OF SYSTEM TIME follower_read_timestamp()
GROUP BY crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8`

sqlConn2 := sqlutils.MakeSQLRunner(s.SQLConn(t))
rows := sqlConn2.Query(t, readStmt)
shard := make([]int64, 8)
count := make([]int64, 8)
for j := 0; rows.Next(); {
err := rows.Scan(&shard[j], &count[j])
require.NoError(t, err)
j += 1
}
t.Fatalf("limitReached should be true. loop: %d; shards: %d counts: %d", i, shard, count)
}
}

// Close the transaction.
Expand Down

0 comments on commit 981d187

Please sign in to comment.