From 8e4578b0e205020ded494b2274185ad3a6c84029 Mon Sep 17 00:00:00 2001 From: Evan Wall Date: Tue, 18 Jul 2023 14:18:29 -0400 Subject: [PATCH 1/7] sql: fix `CREATE TABLE AS` sourcing `SHOW ` job failures Fixes #106260 Previously `CREATE TABLE AS`/`CREATE MATERIALIZED VIEW AS` sourcing from `SHOW
` generated a failing schema change job with a `relation "tbl" does not exist error` because the SHOW source table was not fully qualified. This PR fixes this by respecting the `Builder.qualifyDataSourceNamesInAST` flag in `delegate.TryDelegate()` which implements the failing SHOW commands. Release note (bug fix): Fix failing schema change job when CREATE TABLE AS or CREATE MATERAILIZED VIEW AS sources from a SHOW command: 1. CREATE TABLE t AS SELECT * FROM [SHOW CREATE TABLE tbl]; 2. CREATE TABLE t AS SELECT * FROM [SHOW INDEXES FROM tbl]; 3. CREATE TABLE t AS SELECT * FROM [SHOW COLUMNS FROM tbl]; 4. CREATE TABLE t AS SELECT * FROM [SHOW CONSTRAINTS FROM tbl]; 5. CREATE TABLE t AS SELECT * FROM [SHOW PARTITIONS FROM TABLE tbl]; 6. CREATE TABLE t AS SELECT * FROM [SHOW PARTITIONS FROM INDEX tbl@tbl_pkey]; --- pkg/sql/create_as_test.go | 18 ------- pkg/sql/delegate/delegate.go | 81 ++++++++++++++++++++++++++--- pkg/sql/delegate/show_partitions.go | 27 ++-------- pkg/sql/delegate/show_table.go | 12 +---- pkg/sql/opt/optbuilder/builder.go | 8 ++- 5 files changed, 87 insertions(+), 59 deletions(-) diff --git a/pkg/sql/create_as_test.go b/pkg/sql/create_as_test.go index cbb77a507be1..4c2835b648bb 100644 --- a/pkg/sql/create_as_test.go +++ b/pkg/sql/create_as_test.go @@ -137,9 +137,6 @@ func TestCreateAsShow(t *testing.T) { { sql: "SHOW CREATE TABLE show_create_tbl", setup: "CREATE TABLE show_create_tbl (id int PRIMARY KEY)", - // TODO(sql-foundations): Fix `relation "show_create_tbl" does not exist` error in job. - // See https://github.com/cockroachdb/cockroach/issues/106260. - skip: true, }, { sql: "SHOW CREATE FUNCTION show_create_fn", @@ -157,23 +154,14 @@ func TestCreateAsShow(t *testing.T) { { sql: "SHOW INDEXES FROM show_indexes_tbl", setup: "CREATE TABLE show_indexes_tbl (id int PRIMARY KEY)", - // TODO(sql-foundations): Fix `relation "show_indexes_tbl" does not exist` error in job. - // See https://github.com/cockroachdb/cockroach/issues/106260. - skip: true, }, { sql: "SHOW COLUMNS FROM show_columns_tbl", setup: "CREATE TABLE show_columns_tbl (id int PRIMARY KEY)", - // TODO(sql-foundations): Fix `relation "show_columns_tbl" does not exist` error in job. - // See https://github.com/cockroachdb/cockroach/issues/106260. - skip: true, }, { sql: "SHOW CONSTRAINTS FROM show_constraints_tbl", setup: "CREATE TABLE show_constraints_tbl (id int PRIMARY KEY)", - // TODO(sql-foundations): Fix `relation "show_constraints_tbl" does not exist` error in job. - // See https://github.com/cockroachdb/cockroach/issues/106260. - skip: true, }, { sql: "SHOW PARTITIONS FROM DATABASE defaultdb", @@ -181,16 +169,10 @@ func TestCreateAsShow(t *testing.T) { { sql: "SHOW PARTITIONS FROM TABLE show_partitions_tbl", setup: "CREATE TABLE show_partitions_tbl (id int PRIMARY KEY)", - // TODO(sql-foundations): Fix `relation "show_partitions_tbl" does not exist` error in job. - // See https://github.com/cockroachdb/cockroach/issues/106260. - skip: true, }, { sql: "SHOW PARTITIONS FROM INDEX show_partitions_idx_tbl@show_partitions_idx_tbl_pkey", setup: "CREATE TABLE show_partitions_idx_tbl (id int PRIMARY KEY)", - // TODO(sql-foundations): Fix `relation "show_partitions_idx_tbl" does not exist` error in job. - // See https://github.com/cockroachdb/cockroach/issues/106260. - skip: true, }, { sql: "SHOW GRANTS", diff --git a/pkg/sql/delegate/delegate.go b/pkg/sql/delegate/delegate.go index 6edd7c0aa1f0..e7926d197200 100644 --- a/pkg/sql/delegate/delegate.go +++ b/pkg/sql/delegate/delegate.go @@ -33,12 +33,17 @@ import ( // can be rewritten as a lower level query. If it can, returns a new AST which // is equivalent to the original statement. Otherwise, returns nil. func TryDelegate( - ctx context.Context, catalog cat.Catalog, evalCtx *eval.Context, stmt tree.Statement, + ctx context.Context, + catalog cat.Catalog, + evalCtx *eval.Context, + stmt tree.Statement, + qualifyDataSourceNamesInAST bool, ) (tree.Statement, error) { d := delegator{ - ctx: ctx, - catalog: catalog, - evalCtx: evalCtx, + ctx: ctx, + catalog: catalog, + evalCtx: evalCtx, + qualifyDataSourceNamesInAST: qualifyDataSourceNamesInAST, } switch t := stmt.(type) { case *tree.ShowClusterSettingList: @@ -197,9 +202,10 @@ func TryDelegate( } type delegator struct { - ctx context.Context - catalog cat.Catalog - evalCtx *eval.Context + ctx context.Context + catalog cat.Catalog + evalCtx *eval.Context + qualifyDataSourceNamesInAST bool } func (d *delegator) parse(sql string) (tree.Statement, error) { @@ -210,3 +216,64 @@ func (d *delegator) parse(sql string) (tree.Statement, error) { d.evalCtx.Planner.MaybeReallocateAnnotations(s.NumAnnotations) return s.AST, err } + +// We avoid the cache so that we can observe the details without +// taking a lease, like other SHOW commands. +var resolveFlags = cat.Flags{AvoidDescriptorCaches: true, NoTableStats: true} + +// resolveAndModifyUnresolvedObjectName may modify the name input +// if d.qualifyDataSourceNamesInAST == true +func (d *delegator) resolveAndModifyUnresolvedObjectName( + name *tree.UnresolvedObjectName, +) (cat.DataSource, cat.DataSourceName, error) { + tn := name.ToTableName() + dataSource, resName, err := d.catalog.ResolveDataSource(d.ctx, resolveFlags, &tn) + if err != nil { + return nil, cat.DataSourceName{}, err + } + if err := d.catalog.CheckAnyPrivilege(d.ctx, dataSource); err != nil { + return nil, cat.DataSourceName{}, err + } + // Use qualifyDataSourceNamesInAST similarly to the Builder so that + // CREATE TABLE AS can source from a delegated expression. + // For example: CREATE TABLE t2 AS SELECT * FROM [SHOW CREATE t1]; + if d.qualifyDataSourceNamesInAST { + resName.ExplicitSchema = true + resName.ExplicitCatalog = true + *name = *resName.ToUnresolvedObjectName() + } + return dataSource, resName, nil +} + +// resolveAndModifyTableIndexName may modify the name input +// if d.qualifyDataSourceNamesInAST == true +func (d *delegator) resolveAndModifyTableIndexName( + name *tree.TableIndexName, +) (cat.DataSource, cat.DataSourceName, error) { + + tn := name.Table + dataSource, resName, err := d.catalog.ResolveDataSource(d.ctx, resolveFlags, &tn) + if err != nil { + return nil, cat.DataSourceName{}, err + } + + if err := d.catalog.CheckAnyPrivilege(d.ctx, dataSource); err != nil { + return nil, cat.DataSourceName{}, err + } + + // Force resolution of the index. + _, _, err = cat.ResolveTableIndex(d.ctx, d.catalog, resolveFlags, name) + if err != nil { + return nil, cat.DataSourceName{}, err + } + + // Use qualifyDataSourceNamesInAST similarly to the Builder so that + // CREATE TABLE AS can source from a delegated expression. + // For example: CREATE TABLE t2 AS SELECT * FROM [SHOW PARTITIONS FROM INDEX t1@t1_pkey]; + if d.qualifyDataSourceNamesInAST { + resName.ExplicitSchema = true + resName.ExplicitCatalog = true + (*name).Table = resName.ToUnresolvedObjectName().ToTableName() + } + return dataSource, resName, nil +} diff --git a/pkg/sql/delegate/show_partitions.go b/pkg/sql/delegate/show_partitions.go index 4eb8c38ae2a7..536f6d61ea7c 100644 --- a/pkg/sql/delegate/show_partitions.go +++ b/pkg/sql/delegate/show_partitions.go @@ -14,7 +14,6 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/sql/lexbase" - "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -25,16 +24,10 @@ import ( func (d *delegator) delegateShowPartitions(n *tree.ShowPartitions) (tree.Statement, error) { sqltelemetry.IncrementShowCounter(sqltelemetry.Partitions) if n.IsTable { - flags := cat.Flags{AvoidDescriptorCaches: true, NoTableStats: true} - tn := n.Table.ToTableName() - - dataSource, resName, err := d.catalog.ResolveDataSource(d.ctx, flags, &tn) + _, resName, err := d.resolveAndModifyUnresolvedObjectName(n.Table) if err != nil { return nil, err } - if err := d.catalog.CheckAnyPrivilege(d.ctx, dataSource); err != nil { - return nil, err - } // We use the raw_config_sql from the partition_lookup result to get the // official zone config for the partition, and use the full_config_sql from the zones table @@ -108,28 +101,16 @@ func (d *delegator) delegateShowPartitions(n *tree.ShowPartitions) (tree.Stateme return d.parse(fmt.Sprintf(showDatabasePartitionsQuery, n.Database.String(), lexbase.EscapeSQLString(string(n.Database)))) } - flags := cat.Flags{AvoidDescriptorCaches: true, NoTableStats: true} - tn := n.Index.Table - // Throw a more descriptive error if the user did not use the index hint syntax. - if tn.ObjectName == "" { + tableIndexName := &n.Index + if tableIndexName.Table.ObjectName == "" { err := errors.New("no table specified") err = pgerror.WithCandidateCode(err, pgcode.InvalidParameterValue) err = errors.WithHint(err, "Specify a table using the hint syntax of table@index.") return nil, err } - dataSource, resName, err := d.catalog.ResolveDataSource(d.ctx, flags, &tn) - if err != nil { - return nil, err - } - - if err := d.catalog.CheckAnyPrivilege(d.ctx, dataSource); err != nil { - return nil, err - } - - // Force resolution of the index. - _, _, err = cat.ResolveTableIndex(d.ctx, d.catalog, flags, &n.Index) + _, resName, err := d.resolveAndModifyTableIndexName(tableIndexName) if err != nil { return nil, err } diff --git a/pkg/sql/delegate/show_table.go b/pkg/sql/delegate/show_table.go index cdd7049223e8..36ed85aaa2bf 100644 --- a/pkg/sql/delegate/show_table.go +++ b/pkg/sql/delegate/show_table.go @@ -14,7 +14,6 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/sql/lexbase" - "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/errors" @@ -297,18 +296,11 @@ func (d *delegator) delegateShowCreateAllTables() (tree.Statement, error) { func (d *delegator) showTableDetails( name *tree.UnresolvedObjectName, query string, ) (tree.Statement, error) { - // We avoid the cache so that we can observe the details without - // taking a lease, like other SHOW commands. - flags := cat.Flags{AvoidDescriptorCaches: true, NoTableStats: true} - tn := name.ToTableName() - dataSource, resName, err := d.catalog.ResolveDataSource(d.ctx, flags, &tn) + + dataSource, resName, err := d.resolveAndModifyUnresolvedObjectName(name) if err != nil { return nil, err } - if err := d.catalog.CheckAnyPrivilege(d.ctx, dataSource); err != nil { - return nil, err - } - fullQuery := fmt.Sprintf(query, lexbase.EscapeSQLString(resName.Catalog()), lexbase.EscapeSQLString(resName.Table()), diff --git a/pkg/sql/opt/optbuilder/builder.go b/pkg/sql/opt/optbuilder/builder.go index 59a0ab1640d2..3211ada94334 100644 --- a/pkg/sql/opt/optbuilder/builder.go +++ b/pkg/sql/opt/optbuilder/builder.go @@ -423,7 +423,13 @@ func (b *Builder) buildStmt( default: // See if this statement can be rewritten to another statement using the // delegate functionality. - newStmt, err := delegate.TryDelegate(b.ctx, b.catalog, b.evalCtx, stmt) + newStmt, err := delegate.TryDelegate( + b.ctx, + b.catalog, + b.evalCtx, + stmt, + b.qualifyDataSourceNamesInAST, + ) if err != nil { panic(err) } From db4be30fa87a2f04b338d7d9308bca13efbf6b37 Mon Sep 17 00:00:00 2001 From: maryliag Date: Wed, 19 Jul 2023 10:41:07 -0400 Subject: [PATCH 2/7] ui: fix creation index syntax on console With an update to make the table name fully qualified, the inedx name was also using the fully qualified name, which contains ".", and that causes an invalid syntax since index name can't have periods. Having the qualified name is not important for the index name itself, so this commit fixes by ignoring that part and using just the table name, how it was doing previously. It also fixes an invalid syntax when there were spaces on the name generate. It also add a little more observability into indexes being created with STORING clause, adding that to the suffix of the index creation. Fixes #107168 Release note (bug fix): Index recommendation on the UI no longer uses the full qualified name of a table to create an index name, allowing the creating of indexes directly from the Console to work. --- .../src/insights/indexActionBtn.spec.ts | 15 ++++++++------- .../cluster-ui/src/insights/indexActionBtn.tsx | 8 ++++++-- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.spec.ts b/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.spec.ts index 1abadc425f82..2a31e42af915 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.spec.ts +++ b/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.spec.ts @@ -20,19 +20,20 @@ describe("Create index name", () => { { name: "one parameter no space", query: "CREATE INDEX ON t(i) STORING (k)", - expected: "CREATE INDEX IF NOT EXISTS t_i_rec_idx ON t(i) STORING (k)", + expected: + "CREATE INDEX IF NOT EXISTS t_i_storing_rec_idx ON t(i) STORING (k)", }, { name: "two parameters", query: "CREATE INDEX ON t (i, j) STORING (k)", expected: - "CREATE INDEX IF NOT EXISTS t_i_j_rec_idx ON t (i, j) STORING (k)", + "CREATE INDEX IF NOT EXISTS t_i_j_storing_rec_idx ON t (i, j) STORING (k)", }, { name: "one parameter, one expression", query: "CREATE INDEX ON t (i, (j + k)) STORING (k)", expected: - "CREATE INDEX IF NOT EXISTS t_i_expr_rec_idx ON t (i, (j + k)) STORING (k)", + "CREATE INDEX IF NOT EXISTS t_i_expr_storing_rec_idx ON t (i, (j + k)) STORING (k)", }, { name: "one parameter, one expression no parenthesis", @@ -43,7 +44,7 @@ describe("Create index name", () => { name: "two expressions", query: "CREATE INDEX ON t ((i+l), (j + k)) STORING (k)", expected: - "CREATE INDEX IF NOT EXISTS t_expr_expr1_rec_idx ON t ((i+l), (j + k)) STORING (k)", + "CREATE INDEX IF NOT EXISTS t_expr_expr1_storing_rec_idx ON t ((i+l), (j + k)) STORING (k)", }, { name: "one expression, one parameter", @@ -54,19 +55,19 @@ describe("Create index name", () => { name: "two expressions, one parameter", query: "CREATE INDEX ON t ((i + l), (j + k), a) STORING (k)", expected: - "CREATE INDEX IF NOT EXISTS t_expr_expr1_a_rec_idx ON t ((i + l), (j + k), a) STORING (k)", + "CREATE INDEX IF NOT EXISTS t_expr_expr1_a_storing_rec_idx ON t ((i + l), (j + k), a) STORING (k)", }, { name: "invalid expression, missing )", query: "CREATE INDEX ON t ((i + l, (j + k), a) STORING (k)", expected: - "CREATE INDEX IF NOT EXISTS t_expr_expr1_expr2_rec_idx ON t ((i + l, (j + k), a) STORING (k)", + "CREATE INDEX IF NOT EXISTS t_expr_expr1_expr2_storing_rec_idx ON t ((i + l, (j + k), a) STORING (k)", }, { name: "invalid expression, extra )", query: "CREATE INDEX ON t ((i + l)), (j + k), a) STORING (k)", expected: - "CREATE INDEX IF NOT EXISTS t_expr_rec_idx ON t ((i + l)), (j + k), a) STORING (k)", + "CREATE INDEX IF NOT EXISTS t_expr_storing_rec_idx ON t ((i + l)), (j + k), a) STORING (k)", }, ]; diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.tsx b/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.tsx index ae5779903df7..b5a816f0a6da 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.tsx @@ -256,8 +256,12 @@ export function createIdxName(statement: string): string { idxName += "_" + value; } } - - idxName += "_rec_idx"; + const suffix = + statement.indexOf("STORING") >= 0 ? "_storing_rec_idx" : "_rec_idx"; + // The table name is fully qualified at this point, but we don't need the full name, + // so just use the last value (also an index name can't have ".") + const idxNameArr = idxName.split("."); + idxName = idxNameArr[idxNameArr.length - 1].replace(/\s/g, "_") + suffix; return statement.replace( "CREATE INDEX ON ", From 466c4e9b7093c01177b27a37c597b4e281209cef Mon Sep 17 00:00:00 2001 From: Alex Santamaura Date: Fri, 14 Jul 2023 16:59:51 -0400 Subject: [PATCH 3/7] telemetryccl: Split `TestTelemetry` into smaller sub-tests This change breaks up `TestTelemetry` and the `multiregion` test into four tests due to the test being quite large. The tests are: `multiregion`, `multiregion_db`, `multiregion_table`, and `multiregion_row`. Informs: #103004 Release note: None --- .../testdata/telemetry/multiregion | 366 +----------------- .../testdata/telemetry/multiregion_db | 88 +++++ .../testdata/telemetry/multiregion_row | 176 +++++++++ .../testdata/telemetry/multiregion_table | 128 ++++++ pkg/sql/sqltestutils/telemetry.go | 3 + 5 files changed, 398 insertions(+), 363 deletions(-) create mode 100644 pkg/ccl/telemetryccl/testdata/telemetry/multiregion_db create mode 100644 pkg/ccl/telemetryccl/testdata/telemetry/multiregion_row create mode 100644 pkg/ccl/telemetryccl/testdata/telemetry/multiregion_table diff --git a/pkg/ccl/telemetryccl/testdata/telemetry/multiregion b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion index 7177b26e44f8..35526188f417 100644 --- a/pkg/ccl/telemetryccl/testdata/telemetry/multiregion +++ b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion @@ -2,376 +2,16 @@ feature-allowlist sql.multiregion.* ---- -feature-usage -CREATE DATABASE survive_zone PRIMARY REGION "us-east-1" SURVIVE ZONE FAILURE ----- -sql.multiregion.create_database -sql.multiregion.create_database.survival_goal.survive_zone_failure - -feature-usage -CREATE DATABASE survive_region PRIMARY REGION "us-east-1" REGIONS "ap-southeast-2", "ca-central-1" SURVIVE REGION FAILURE ----- -sql.multiregion.create_database -sql.multiregion.create_database.survival_goal.survive_region_failure - -feature-usage -CREATE DATABASE d PRIMARY REGION "us-east-1" REGION "ca-central-1" ----- -sql.multiregion.create_database -sql.multiregion.create_database.survival_goal.survive_default - -exec -SET enable_multiregion_placement_policy = true; ----- - -feature-usage -CREATE DATABASE create_placement PRIMARY REGION "us-east-1" PLACEMENT RESTRICTED ----- -sql.multiregion.create_database -sql.multiregion.create_database.placement.restricted -sql.multiregion.create_database.survival_goal.survive_default - -feature-usage -CREATE DATABASE create_placement_default PRIMARY REGION "us-east-1" PLACEMENT DEFAULT ----- -sql.multiregion.create_database -sql.multiregion.create_database.placement.default -sql.multiregion.create_database.survival_goal.survive_default - -exec -CREATE DATABASE to_be_restricted PRIMARY REGION "us-east-1" ----- - -feature-usage -ALTER DATABASE to_be_restricted PLACEMENT RESTRICTED ----- -sql.multiregion.alter_database.placement.restricted - -feature-usage -ALTER DATABASE to_be_restricted PLACEMENT DEFAULT ----- -sql.multiregion.alter_database.placement.default - -feature-usage -ALTER DATABASE d DROP REGION "ca-central-1" ----- -sql.multiregion.drop_region - -feature-usage -ALTER DATABASE d ADD REGION "ap-southeast-2" ----- -sql.multiregion.add_region - -feature-usage -ALTER DATABASE d SET PRIMARY REGION "ap-southeast-2" ----- -sql.multiregion.alter_database.set_primary_region.switch_primary_region - -feature-usage -ALTER DATABASE d DROP REGION "us-east-1" ----- -sql.multiregion.drop_region - -feature-usage -ALTER DATABASE d DROP REGION "ap-southeast-2" ----- -sql.multiregion.drop_primary_region - -feature-usage -ALTER DATABASE d SET PRIMARY REGION "ca-central-1" ----- -sql.multiregion.alter_database.set_primary_region.initial_multiregion - -feature-usage -ALTER DATABASE d SURVIVE ZONE FAILURE ----- -sql.multiregion.alter_database.survival_goal.survive_zone_failure - -exec -USE d; -ALTER DATABASE d ADD REGION "ap-southeast-2" ----- - -feature-usage -CREATE TABLE t1 () ----- -sql.multiregion.create_table.locality.unspecified - -feature-usage -CREATE TABLE t2 () LOCALITY REGIONAL BY TABLE ----- -sql.multiregion.create_table.locality.regional_by_table - -feature-usage -CREATE TABLE t3 () LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- -sql.multiregion.create_table.locality.regional_by_table_in - -feature-usage -CREATE TABLE t4 () LOCALITY GLOBAL ----- -sql.multiregion.create_table.locality.global - -feature-usage -CREATE TABLE t5 () LOCALITY REGIONAL BY ROW ----- -sql.multiregion.create_table.locality.regional_by_row - -feature-usage -CREATE TABLE t6 (cr crdb_internal_region) LOCALITY REGIONAL BY ROW AS cr ----- -sql.multiregion.create_table.locality.regional_by_row_as - -# -# REGIONAL BY TABLE -> the others -# - -feature-usage -ALTER TABLE t1 SET LOCALITY REGIONAL BY ROW ----- -sql.multiregion.alter_table.locality.from.regional_by_table.to.regional_by_row - -exec -ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE ----- - -feature-usage -ALTER TABLE t1 SET LOCALITY GLOBAL ----- -sql.multiregion.alter_table.locality.from.regional_by_table.to.global - -exec -ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE ----- - -feature-usage -ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- -sql.multiregion.alter_table.locality.from.regional_by_table.to.regional_by_table_in - -exec -ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE; -ALTER TABLE t1 ADD COLUMN cr crdb_internal_region NOT NULL ----- - -feature-usage -ALTER TABLE t1 SET LOCALITY REGIONAL BY ROW AS "cr" ----- -sql.multiregion.alter_table.locality.from.regional_by_table.to.regional_by_row_as - -exec -ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE ----- - -feature-usage -ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE ----- -sql.multiregion.alter_table.locality.from.regional_by_table.to.regional_by_table - -exec -ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE ----- - -# -# REGIONAL BY TABLE IN "ap-southeast-2" -> the others -# - -feature-usage -ALTER TABLE t3 SET LOCALITY REGIONAL BY ROW ----- -sql.multiregion.alter_table.locality.from.regional_by_table_in.to.regional_by_row - -exec -ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- - -feature-usage -ALTER TABLE t3 SET LOCALITY GLOBAL ----- -sql.multiregion.alter_table.locality.from.regional_by_table_in.to.global - -exec -ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- - -feature-usage -ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- -sql.multiregion.alter_table.locality.from.regional_by_table_in.to.regional_by_table_in - -exec -ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2"; -ALTER TABLE t3 ADD COLUMN cr crdb_internal_region NOT NULL ----- - -feature-usage -ALTER TABLE t3 SET LOCALITY REGIONAL BY ROW AS "cr" ----- -sql.multiregion.alter_table.locality.from.regional_by_table_in.to.regional_by_row_as - -exec -ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- - -feature-usage -ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE ----- -sql.multiregion.alter_table.locality.from.regional_by_table_in.to.regional_by_table - -exec -ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- - -# -# GLOBAL -> the others -# - -feature-usage -ALTER TABLE t4 SET LOCALITY REGIONAL BY ROW ----- -sql.multiregion.alter_table.locality.from.global.to.regional_by_row - -exec -ALTER TABLE t4 SET LOCALITY GLOBAL ----- - -feature-usage -ALTER TABLE t4 SET LOCALITY GLOBAL ----- -sql.multiregion.alter_table.locality.from.global.to.global - -exec -ALTER TABLE t4 SET LOCALITY GLOBAL ----- - -feature-usage -ALTER TABLE t4 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- -sql.multiregion.alter_table.locality.from.global.to.regional_by_table_in - -exec -ALTER TABLE t4 SET LOCALITY GLOBAL; -ALTER TABLE t4 ADD COLUMN cr crdb_internal_region NOT NULL ----- - -feature-usage -ALTER TABLE t4 SET LOCALITY REGIONAL BY ROW AS "cr" ----- -sql.multiregion.alter_table.locality.from.global.to.regional_by_row_as - -exec -ALTER TABLE t4 SET LOCALITY GLOBAL ----- - -feature-usage -ALTER TABLE t4 SET LOCALITY REGIONAL BY TABLE ----- -sql.multiregion.alter_table.locality.from.global.to.regional_by_table - exec -ALTER TABLE t4 SET LOCALITY GLOBAL +CREATE DATABASE d PRIMARY REGION "ca-central-1" REGION "ap-southeast-2" ---- -# -# REGIONAL BY ROW -> the others -# - -feature-usage -ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW ----- -sql.multiregion.alter_table.locality.from.regional_by_row.to.regional_by_row - exec -ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW ----- - -feature-usage -ALTER TABLE t5 SET LOCALITY GLOBAL ----- -sql.multiregion.alter_table.locality.from.regional_by_row.to.global - -exec -ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW ----- - -feature-usage -ALTER TABLE t5 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- -sql.multiregion.alter_table.locality.from.regional_by_row.to.regional_by_table_in - -exec -ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW; ----- - -exec -ALTER TABLE t5 ADD COLUMN cr crdb_internal_region NOT NULL ----- - -feature-usage -ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW AS "cr" ----- -sql.multiregion.alter_table.locality.from.regional_by_row.to.regional_by_row_as - -exec -ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW ----- - -feature-usage -ALTER TABLE t5 SET LOCALITY REGIONAL BY TABLE ----- -sql.multiregion.alter_table.locality.from.regional_by_row.to.regional_by_table - -exec -ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW ----- - -# -# REGIONAL BY TABLE -> the others -# - -feature-usage -ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW ----- -sql.multiregion.alter_table.locality.from.regional_by_row_as.to.regional_by_row - -exec -ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" ----- - -feature-usage -ALTER TABLE t6 SET LOCALITY GLOBAL ----- -sql.multiregion.alter_table.locality.from.regional_by_row_as.to.global - -exec -ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" ----- - -feature-usage -ALTER TABLE t6 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" ----- -sql.multiregion.alter_table.locality.from.regional_by_row_as.to.regional_by_table_in - -exec -ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" ----- - -feature-usage -ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" ----- -sql.multiregion.alter_table.locality.from.regional_by_row_as.to.regional_by_row_as - -exec -ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" ----- - -feature-usage -ALTER TABLE t6 SET LOCALITY REGIONAL BY TABLE +CREATE DATABASE survive_region PRIMARY REGION "us-east-1" REGIONS "ap-southeast-2", "ca-central-1" SURVIVE REGION FAILURE ---- -sql.multiregion.alter_table.locality.from.regional_by_row_as.to.regional_by_table exec -ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" +SET enable_multiregion_placement_policy = true; ---- exec diff --git a/pkg/ccl/telemetryccl/testdata/telemetry/multiregion_db b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion_db new file mode 100644 index 000000000000..1c61eb5138ad --- /dev/null +++ b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion_db @@ -0,0 +1,88 @@ +feature-allowlist +sql.multiregion.* +---- + +feature-usage +CREATE DATABASE survive_zone PRIMARY REGION "us-east-1" SURVIVE ZONE FAILURE +---- +sql.multiregion.create_database +sql.multiregion.create_database.survival_goal.survive_zone_failure + +feature-usage +CREATE DATABASE survive_region PRIMARY REGION "us-east-1" REGIONS "ap-southeast-2", "ca-central-1" SURVIVE REGION FAILURE +---- +sql.multiregion.create_database +sql.multiregion.create_database.survival_goal.survive_region_failure + +feature-usage +CREATE DATABASE d PRIMARY REGION "us-east-1" REGION "ca-central-1" +---- +sql.multiregion.create_database +sql.multiregion.create_database.survival_goal.survive_default + +exec +SET enable_multiregion_placement_policy = true; +---- + +feature-usage +CREATE DATABASE create_placement PRIMARY REGION "us-east-1" PLACEMENT RESTRICTED +---- +sql.multiregion.create_database +sql.multiregion.create_database.placement.restricted +sql.multiregion.create_database.survival_goal.survive_default + +feature-usage +CREATE DATABASE create_placement_default PRIMARY REGION "us-east-1" PLACEMENT DEFAULT +---- +sql.multiregion.create_database +sql.multiregion.create_database.placement.default +sql.multiregion.create_database.survival_goal.survive_default + +exec +CREATE DATABASE to_be_restricted PRIMARY REGION "us-east-1" +---- + +feature-usage +ALTER DATABASE to_be_restricted PLACEMENT RESTRICTED +---- +sql.multiregion.alter_database.placement.restricted + +feature-usage +ALTER DATABASE to_be_restricted PLACEMENT DEFAULT +---- +sql.multiregion.alter_database.placement.default + +feature-usage +ALTER DATABASE d DROP REGION "ca-central-1" +---- +sql.multiregion.drop_region + +feature-usage +ALTER DATABASE d ADD REGION "ap-southeast-2" +---- +sql.multiregion.add_region + +feature-usage +ALTER DATABASE d SET PRIMARY REGION "ap-southeast-2" +---- +sql.multiregion.alter_database.set_primary_region.switch_primary_region + +feature-usage +ALTER DATABASE d DROP REGION "us-east-1" +---- +sql.multiregion.drop_region + +feature-usage +ALTER DATABASE d DROP REGION "ap-southeast-2" +---- +sql.multiregion.drop_primary_region + +feature-usage +ALTER DATABASE d SET PRIMARY REGION "ca-central-1" +---- +sql.multiregion.alter_database.set_primary_region.initial_multiregion + +feature-usage +ALTER DATABASE d SURVIVE ZONE FAILURE +---- +sql.multiregion.alter_database.survival_goal.survive_zone_failure diff --git a/pkg/ccl/telemetryccl/testdata/telemetry/multiregion_row b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion_row new file mode 100644 index 000000000000..1e159b4f3a01 --- /dev/null +++ b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion_row @@ -0,0 +1,176 @@ +feature-allowlist +sql.multiregion.* +---- + +exec +CREATE DATABASE d PRIMARY REGION "us-east-1" REGION "ca-central-1" +---- + +exec +SET enable_multiregion_placement_policy = true; +USE d; +ALTER DATABASE d ADD REGION "ap-southeast-2" +---- + +feature-usage +CREATE TABLE t4 () LOCALITY GLOBAL +---- +sql.multiregion.create_table.locality.global + +feature-usage +CREATE TABLE t5 () LOCALITY REGIONAL BY ROW +---- +sql.multiregion.create_table.locality.regional_by_row + +feature-usage +CREATE TABLE t6 (cr crdb_internal_region) LOCALITY REGIONAL BY ROW AS cr +---- +sql.multiregion.create_table.locality.regional_by_row_as + +# +# GLOBAL -> the others +# + +feature-usage +ALTER TABLE t4 SET LOCALITY REGIONAL BY ROW +---- +sql.multiregion.alter_table.locality.from.global.to.regional_by_row + +exec +ALTER TABLE t4 SET LOCALITY GLOBAL +---- + +feature-usage +ALTER TABLE t4 SET LOCALITY GLOBAL +---- +sql.multiregion.alter_table.locality.from.global.to.global + +exec +ALTER TABLE t4 SET LOCALITY GLOBAL +---- + +feature-usage +ALTER TABLE t4 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- +sql.multiregion.alter_table.locality.from.global.to.regional_by_table_in + +exec +ALTER TABLE t4 SET LOCALITY GLOBAL; +ALTER TABLE t4 ADD COLUMN cr crdb_internal_region NOT NULL +---- + +feature-usage +ALTER TABLE t4 SET LOCALITY REGIONAL BY ROW AS "cr" +---- +sql.multiregion.alter_table.locality.from.global.to.regional_by_row_as + +exec +ALTER TABLE t4 SET LOCALITY GLOBAL +---- + +feature-usage +ALTER TABLE t4 SET LOCALITY REGIONAL BY TABLE +---- +sql.multiregion.alter_table.locality.from.global.to.regional_by_table + +exec +ALTER TABLE t4 SET LOCALITY GLOBAL +---- + +# +# REGIONAL BY ROW -> the others +# + +feature-usage +ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW +---- +sql.multiregion.alter_table.locality.from.regional_by_row.to.regional_by_row + +exec +ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW +---- + +feature-usage +ALTER TABLE t5 SET LOCALITY GLOBAL +---- +sql.multiregion.alter_table.locality.from.regional_by_row.to.global + +exec +ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW +---- + +feature-usage +ALTER TABLE t5 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- +sql.multiregion.alter_table.locality.from.regional_by_row.to.regional_by_table_in + +exec +ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW; +---- + +exec +ALTER TABLE t5 ADD COLUMN cr crdb_internal_region NOT NULL +---- + +feature-usage +ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW AS "cr" +---- +sql.multiregion.alter_table.locality.from.regional_by_row.to.regional_by_row_as + +exec +ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW +---- + +feature-usage +ALTER TABLE t5 SET LOCALITY REGIONAL BY TABLE +---- +sql.multiregion.alter_table.locality.from.regional_by_row.to.regional_by_table + +exec +ALTER TABLE t5 SET LOCALITY REGIONAL BY ROW +---- + +# +# REGIONAL BY TABLE -> the others +# + +feature-usage +ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW +---- +sql.multiregion.alter_table.locality.from.regional_by_row_as.to.regional_by_row + +exec +ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" +---- + +feature-usage +ALTER TABLE t6 SET LOCALITY GLOBAL +---- +sql.multiregion.alter_table.locality.from.regional_by_row_as.to.global + +exec +ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" +---- + +feature-usage +ALTER TABLE t6 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- +sql.multiregion.alter_table.locality.from.regional_by_row_as.to.regional_by_table_in + +exec +ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" +---- + +feature-usage +ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" +---- +sql.multiregion.alter_table.locality.from.regional_by_row_as.to.regional_by_row_as + +exec +ALTER TABLE t6 SET LOCALITY REGIONAL BY ROW AS "cr" +---- + +feature-usage +ALTER TABLE t6 SET LOCALITY REGIONAL BY TABLE +---- +sql.multiregion.alter_table.locality.from.regional_by_row_as.to.regional_by_table diff --git a/pkg/ccl/telemetryccl/testdata/telemetry/multiregion_table b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion_table new file mode 100644 index 000000000000..4a6fa893f133 --- /dev/null +++ b/pkg/ccl/telemetryccl/testdata/telemetry/multiregion_table @@ -0,0 +1,128 @@ +feature-allowlist +sql.multiregion.* +---- + +exec +CREATE DATABASE d PRIMARY REGION "us-east-1" REGION "ca-central-1" +---- + +exec +SET enable_multiregion_placement_policy = true; +USE d; +ALTER DATABASE d ADD REGION "ap-southeast-2" +---- + +feature-usage +CREATE TABLE t1 () +---- +sql.multiregion.create_table.locality.unspecified + +feature-usage +CREATE TABLE t2 () LOCALITY REGIONAL BY TABLE +---- +sql.multiregion.create_table.locality.regional_by_table + +feature-usage +CREATE TABLE t3 () LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- +sql.multiregion.create_table.locality.regional_by_table_in + +# +# REGIONAL BY TABLE -> the others +# + +feature-usage +ALTER TABLE t1 SET LOCALITY REGIONAL BY ROW +---- +sql.multiregion.alter_table.locality.from.regional_by_table.to.regional_by_row + +exec +ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE +---- + +feature-usage +ALTER TABLE t1 SET LOCALITY GLOBAL +---- +sql.multiregion.alter_table.locality.from.regional_by_table.to.global + +exec +ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE +---- + +feature-usage +ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- +sql.multiregion.alter_table.locality.from.regional_by_table.to.regional_by_table_in + +exec +ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE; +ALTER TABLE t1 ADD COLUMN cr crdb_internal_region NOT NULL +---- + +feature-usage +ALTER TABLE t1 SET LOCALITY REGIONAL BY ROW AS "cr" +---- +sql.multiregion.alter_table.locality.from.regional_by_table.to.regional_by_row_as + +exec +ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE +---- + +feature-usage +ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE +---- +sql.multiregion.alter_table.locality.from.regional_by_table.to.regional_by_table + +exec +ALTER TABLE t1 SET LOCALITY REGIONAL BY TABLE +---- + +# +# REGIONAL BY TABLE IN "ap-southeast-2" -> the others +# + +feature-usage +ALTER TABLE t3 SET LOCALITY REGIONAL BY ROW +---- +sql.multiregion.alter_table.locality.from.regional_by_table_in.to.regional_by_row + +exec +ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- + +feature-usage +ALTER TABLE t3 SET LOCALITY GLOBAL +---- +sql.multiregion.alter_table.locality.from.regional_by_table_in.to.global + +exec +ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- + +feature-usage +ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- +sql.multiregion.alter_table.locality.from.regional_by_table_in.to.regional_by_table_in + +exec +ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2"; +ALTER TABLE t3 ADD COLUMN cr crdb_internal_region NOT NULL +---- + +feature-usage +ALTER TABLE t3 SET LOCALITY REGIONAL BY ROW AS "cr" +---- +sql.multiregion.alter_table.locality.from.regional_by_table_in.to.regional_by_row_as + +exec +ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- + +feature-usage +ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE +---- +sql.multiregion.alter_table.locality.from.regional_by_table_in.to.regional_by_table + +exec +ALTER TABLE t3 SET LOCALITY REGIONAL BY TABLE IN "ap-southeast-2" +---- diff --git a/pkg/sql/sqltestutils/telemetry.go b/pkg/sql/sqltestutils/telemetry.go index d193ef3d0a14..73cbcb66bb2c 100644 --- a/pkg/sql/sqltestutils/telemetry.go +++ b/pkg/sql/sqltestutils/telemetry.go @@ -98,6 +98,9 @@ func TelemetryTest(t *testing.T, serverArgs []base.TestServerArgs, testTenant bo // Index & multiregion are disabled because it requires // multi-region syntax to be enabled for secondary tenants. "testdata/telemetry/multiregion", + "testdata/telemetry/multiregion_db", + "testdata/telemetry/multiregion_table", + "testdata/telemetry/multiregion_row", "testdata/telemetry/index", "testdata/telemetry/planning", "testdata/telemetry/sql-stats": From a792cd30a864cae879f8f91040b7394304ee3572 Mon Sep 17 00:00:00 2001 From: j82w Date: Fri, 14 Jul 2023 15:03:47 -0400 Subject: [PATCH 4/7] ssmemstorage: improve lock contention on RecordStatement 1. The stats object lock was being held even during the insights event creation and insert which has it's own lock. The insights logic is now moved above the stats lock. 2. Moved latency calculation outside the stats lock. 3. Moved error code calculation outside the stats lock. 4. ss_mem_storage converted to use a RWMutex. Each object in the map has it's own lock. Read lock allows mulitple threads to read from the map at the same time which is the common case. Writes are only done the first time a statement is seen. Fixes: #106789, Fixes: #55285 Release note (performance improvement): Reduced lock contention on `ssmemstorage.RecordStatement`. This is useful for workloads that execute the same statement concurrently on the same SQL instance. --- pkg/sql/sqlstats/sslocal/BUILD.bazel | 8 + pkg/sql/sqlstats/sslocal/sql_stats_test.go | 144 +++++++++++++++++ .../sqlstats/ssmemstorage/ss_mem_iterator.go | 13 +- .../sqlstats/ssmemstorage/ss_mem_storage.go | 147 +++++++++++------- .../sqlstats/ssmemstorage/ss_mem_writer.go | 89 +++++++---- 5 files changed, 312 insertions(+), 89 deletions(-) diff --git a/pkg/sql/sqlstats/sslocal/BUILD.bazel b/pkg/sql/sqlstats/sslocal/BUILD.bazel index 158359cd721e..38ce118a8787 100644 --- a/pkg/sql/sqlstats/sslocal/BUILD.bazel +++ b/pkg/sql/sqlstats/sslocal/BUILD.bazel @@ -44,6 +44,7 @@ go_test( deps = [ ":sslocal", "//pkg/base", + "//pkg/kv/kvpb", "//pkg/roachpb", "//pkg/security/securityassets", "//pkg/security/securitytest", @@ -54,6 +55,8 @@ go_test( "//pkg/settings/cluster", "//pkg/sql", "//pkg/sql/appstatspb", + "//pkg/sql/clusterunique", + "//pkg/sql/execstats", "//pkg/sql/sessiondata", "//pkg/sql/sessiondatapb", "//pkg/sql/sessionphase", @@ -62,15 +65,20 @@ go_test( "//pkg/sql/sqlstats/persistedsqlstats", "//pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil", "//pkg/sql/tests", + "//pkg/storage/enginepb", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", "//pkg/util", + "//pkg/util/hlc", "//pkg/util/leaktest", "//pkg/util/log", + "//pkg/util/metric", "//pkg/util/mon", "//pkg/util/timeutil", + "//pkg/util/uint128", + "//pkg/util/uuid", "@com_github_jackc_pgx_v4//:pgx", "@com_github_lib_pq//:pq", "@com_github_stretchr_testify//require", diff --git a/pkg/sql/sqlstats/sslocal/sql_stats_test.go b/pkg/sql/sqlstats/sslocal/sql_stats_test.go index d071eff664d7..ea4d6ea0ecec 100644 --- a/pkg/sql/sqlstats/sslocal/sql_stats_test.go +++ b/pkg/sql/sqlstats/sslocal/sql_stats_test.go @@ -14,6 +14,7 @@ import ( "context" gosql "database/sql" "encoding/json" + "fmt" "math" "net/url" "sort" @@ -22,6 +23,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/serverpb" @@ -29,6 +31,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/appstatspb" + "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" + "github.com/cockroachdb/cockroach/pkg/sql/execstats" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/sessionphase" @@ -38,14 +42,19 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/sslocal" "github.com/cockroachdb/cockroach/pkg/sql/tests" + "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/util/uint128" + "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/jackc/pgx/v4" "github.com/lib/pq" "github.com/stretchr/testify/require" @@ -508,6 +517,141 @@ func TestExplicitTxnFingerprintAccounting(t *testing.T) { } } +func BenchmarkRecordStatement(b *testing.B) { + defer leaktest.AfterTest(b)() + defer log.Scope(b).Close(b) + + ctx := context.Background() + + st := cluster.MakeTestingClusterSettings() + monitor := mon.NewUnlimitedMonitor( + context.Background(), "test", mon.MemoryResource, + nil /* curCount */, nil /* maxHist */, math.MaxInt64, st, + ) + + insightsProvider := insights.New(st, insights.NewMetrics()) + sqlStats := sslocal.New( + st, + sqlstats.MaxMemSQLStatsStmtFingerprints, + sqlstats.MaxMemSQLStatsTxnFingerprints, + metric.NewGauge(sql.MetaReportedSQLStatsMemCurBytes), /* curMemoryBytesCount */ + metric.NewHistogram(metric.HistogramOptions{ + Metadata: sql.MetaReportedSQLStatsMemMaxBytes, + Duration: 10 * time.Second, + MaxVal: 19 * 1000, + SigFigs: 3, + Buckets: metric.MemoryUsage64MBBuckets, + }), /* maxMemoryBytesHist */ + insightsProvider.Writer, + monitor, + nil, /* reportingSink */ + nil, /* knobs */ + insightsProvider.LatencyInformation(), + ) + + appStats := sqlStats.GetApplicationStats("" /* appName */, false /* internal */) + statsCollector := sslocal.NewStatsCollector( + st, + appStats, + sessionphase.NewTimes(), + nil, /* knobs */ + ) + + txnId := uuid.FastMakeV4() + generateRecord := func() sqlstats.RecordedStmtStats { + return sqlstats.RecordedStmtStats{ + SessionID: clusterunique.ID{Uint128: uint128.Uint128{Hi: 0x1771ca67e66e6b48, Lo: 0x1}}, + StatementID: clusterunique.ID{Uint128: uint128.Uint128{Hi: 0x1771ca67e67262e8, Lo: 0x1}}, + TransactionID: txnId, + AutoRetryCount: 0, + AutoRetryReason: error(nil), + RowsAffected: 1, + IdleLatencySec: 0, + ParseLatencySec: 6.5208e-05, + PlanLatencySec: 0.000187041, + RunLatencySec: 0.500771041, + ServiceLatencySec: 0.501024374, + OverheadLatencySec: 1.0839999999845418e-06, + BytesRead: 30, + RowsRead: 1, + RowsWritten: 1, + Nodes: []int64{1}, + StatementType: 1, + Plan: (*appstatspb.ExplainTreePlanNode)(nil), + PlanGist: "AgHQAQIAAwIAAAcGBQYh0AEAAQ==", + StatementError: error(nil), + IndexRecommendations: []string(nil), + Query: "UPDATE t SET s = '_' WHERE id = '_'", + StartTime: time.Date(2023, time.July, 14, 16, 58, 2, 837542000, time.UTC), + EndTime: time.Date(2023, time.July, 14, 16, 58, 3, 338566374, time.UTC), + FullScan: false, + ExecStats: &execstats.QueryLevelStats{ + NetworkBytesSent: 10, + MaxMemUsage: 40960, + MaxDiskUsage: 197, + KVBytesRead: 30, + KVPairsRead: 1, + KVRowsRead: 1, + KVBatchRequestsIssued: 1, + KVTime: 498771793, + MvccSteps: 1, + MvccStepsInternal: 2, + MvccSeeks: 4, + MvccSeeksInternal: 4, + MvccBlockBytes: 39, + MvccBlockBytesInCache: 25, + MvccKeyBytes: 23, + MvccValueBytes: 250, + MvccPointCount: 6, + MvccPointsCoveredByRangeTombstones: 99, + MvccRangeKeyCount: 9, + MvccRangeKeyContainedPoints: 88, + MvccRangeKeySkippedPoints: 66, + NetworkMessages: 55, + ContentionTime: 498546750, + ContentionEvents: []kvpb.ContentionEvent{{Key: []byte{}, TxnMeta: enginepb.TxnMeta{ID: uuid.FastMakeV4(), Key: []uint8{0xf0, 0x89, 0x12, 0x74, 0x65, 0x73, 0x74, 0x0, 0x1, 0x88}, IsoLevel: 0, Epoch: 0, WriteTimestamp: hlc.Timestamp{WallTime: 1689354396802600000, Logical: 0, Synthetic: false}, MinTimestamp: hlc.Timestamp{WallTime: 1689354396802600000, Logical: 0, Synthetic: false}, Priority: 118164, Sequence: 1, CoordinatorNodeID: 1}, Duration: 498546750}}, + RUEstimate: 9999, + CPUTime: 12345, + SqlInstanceIds: map[base.SQLInstanceID]struct{}{1: {}}, + Regions: []string{"test"}}, + Indexes: []string{"104@1"}, + Database: "defaultdb", + } + } + + parallel := []int{10} + for _, p := range parallel { + name := fmt.Sprintf("RecordStatement: Parallelism %d", p) + b.Run(name, func(b *testing.B) { + b.SetParallelism(p) + recordValue := generateRecord() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, err := statsCollector.RecordStatement( + ctx, + appstatspb.StatementStatisticsKey{ + Query: "select * from T where t.l = 1000", + ImplicitTxn: true, + Vec: true, + FullScan: true, + DistSQL: true, + Database: "testDb", + App: "myTestApp", + PlanHash: 9001, + QuerySummary: "select * from T", + }, + recordValue, + ) + // Adds overhead to benchmark and shows up in profile + if err != nil { + require.NoError(b, err) + } + } + }) + }) + } +} + func TestAssociatingStmtStatsWithTxnFingerprint(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/sqlstats/ssmemstorage/ss_mem_iterator.go b/pkg/sql/sqlstats/ssmemstorage/ss_mem_iterator.go index 21b5650f7c31..0da40cb84fbd 100644 --- a/pkg/sql/sqlstats/ssmemstorage/ss_mem_iterator.go +++ b/pkg/sql/sqlstats/ssmemstorage/ss_mem_iterator.go @@ -35,11 +35,14 @@ func NewStmtStatsIterator( container *Container, options sqlstats.IteratorOptions, ) StmtStatsIterator { var stmtKeys stmtList - container.mu.Lock() - for k := range container.mu.stmts { - stmtKeys = append(stmtKeys, k) - } - container.mu.Unlock() + func() { + container.mu.RLock() + defer container.mu.RUnlock() + for k := range container.mu.stmts { + stmtKeys = append(stmtKeys, k) + } + }() + if options.SortedKey { sort.Sort(stmtKeys) } diff --git a/pkg/sql/sqlstats/ssmemstorage/ss_mem_storage.go b/pkg/sql/sqlstats/ssmemstorage/ss_mem_storage.go index 8d8f57b87b0a..7c29ba7d2076 100644 --- a/pkg/sql/sqlstats/ssmemstorage/ss_mem_storage.go +++ b/pkg/sql/sqlstats/ssmemstorage/ss_mem_storage.go @@ -95,9 +95,7 @@ type Container struct { } mu struct { - // TODO(arul): This can be refactored to have a RWLock instead, and have all - // usages acquire a read lock whenever appropriate. See #55285. - syncutil.Mutex + syncutil.RWMutex // acc is the memory account that tracks memory allocations related to stmts // and txns within this Container struct. @@ -107,6 +105,12 @@ type Container struct { stmts map[stmtKey]*stmtStats txns map[appstatspb.TransactionFingerprintID]*txnStats + } + + // Use a separate lock to avoid lock contention. Don't block the statement + // stats just to update the sampled plan time. + muPlanCache struct { + syncutil.RWMutex // sampledPlanMetadataCache records when was the last time the plan was // sampled. This data structure uses a subset of stmtKey as the key into @@ -156,7 +160,7 @@ func New( s.mu.stmts = make(map[stmtKey]*stmtStats) s.mu.txns = make(map[appstatspb.TransactionFingerprintID]*txnStats) - s.mu.sampledPlanMetadataCache = make(map[sampledPlanKey]time.Time) + s.muPlanCache.sampledPlanMetadataCache = make(map[sampledPlanKey]time.Time) s.atomic.uniqueStmtFingerprintCount = uniqueStmtFingerprintCount s.atomic.uniqueTxnFingerprintCount = uniqueTxnFingerprintCount @@ -546,6 +550,18 @@ func (s *Container) getStatsForStmt( func (s *Container) getStatsForStmtWithKey( key stmtKey, stmtFingerprintID appstatspb.StmtFingerprintID, createIfNonexistent bool, ) (stats *stmtStats, created, throttled bool) { + // Use the read lock to get the key to avoid contention. + ok := func() (ok bool) { + s.mu.RLock() + defer s.mu.RUnlock() + stats, ok = s.mu.stmts[key] + return ok + }() + if ok || !createIfNonexistent { + return stats, false /* created */, false /* throttled */ + } + + // Key does not exist in map. Take a full lock to add the key. s.mu.Lock() defer s.mu.Unlock() return s.getStatsForStmtWithKeyLocked(key, stmtFingerprintID, createIfNonexistent) @@ -557,30 +573,32 @@ func (s *Container) getStatsForStmtWithKeyLocked( // Retrieve the per-statement statistic object, and create it if it // doesn't exist yet. stats, ok := s.mu.stmts[key] - if !ok && createIfNonexistent { - // If the uniqueStmtFingerprintCount is nil, then we don't check for - // fingerprint limit. - if s.atomic.uniqueStmtFingerprintCount != nil { - // We check if we have reached the limit of unique fingerprints we can - // store. - limit := s.uniqueStmtFingerprintLimit.Get(&s.st.SV) - incrementedFingerprintCount := - atomic.AddInt64(s.atomic.uniqueStmtFingerprintCount, int64(1) /* delts */) - - // Abort if we have exceeded limit of unique statement fingerprints. - if incrementedFingerprintCount > limit { - atomic.AddInt64(s.atomic.uniqueStmtFingerprintCount, -int64(1) /* delts */) - return stats, false /* created */, true /* throttled */ - } + if ok || !createIfNonexistent { + return stats, false /* created */, false /* throttled */ + } + + // If the uniqueStmtFingerprintCount is nil, then we don't check for + // fingerprint limit. + if s.atomic.uniqueStmtFingerprintCount != nil { + // We check if we have reached the limit of unique fingerprints we can + // store. + limit := s.uniqueStmtFingerprintLimit.Get(&s.st.SV) + incrementedFingerprintCount := + atomic.AddInt64(s.atomic.uniqueStmtFingerprintCount, int64(1) /* delts */) + + // Abort if we have exceeded limit of unique statement fingerprints. + if incrementedFingerprintCount > limit { + atomic.AddInt64(s.atomic.uniqueStmtFingerprintCount, -int64(1) /* delts */) + return stats, false /* created */, true /* throttled */ } - stats = &stmtStats{} - stats.ID = stmtFingerprintID - s.mu.stmts[key] = stats - s.mu.sampledPlanMetadataCache[key.sampledPlanKey] = s.getTimeNow() - - return stats, true /* created */, false /* throttled */ } - return stats, false /* created */, false /* throttled */ + stats = &stmtStats{} + stats.ID = stmtFingerprintID + s.mu.stmts[key] = stats + + s.setLogicalPlanLastSampled(key.sampledPlanKey, s.getTimeNow()) + + return stats, true /* created */, false /* throttled */ } func (s *Container) getStatsForTxnWithKey( @@ -588,9 +606,20 @@ func (s *Container) getStatsForTxnWithKey( stmtFingerprintIDs []appstatspb.StmtFingerprintID, createIfNonexistent bool, ) (stats *txnStats, created, throttled bool) { + // Use the read lock to get the key to avoid contention + ok := func() (ok bool) { + s.mu.RLock() + defer s.mu.RUnlock() + stats, ok = s.mu.txns[key] + return ok + }() + if ok || !createIfNonexistent { + return stats, false /* created */, false /* throttled */ + } + + // Key does not exist in map. Take a full lock to add the key. s.mu.Lock() defer s.mu.Unlock() - return s.getStatsForTxnWithKeyLocked(key, stmtFingerprintIDs, createIfNonexistent) } @@ -602,33 +631,34 @@ func (s *Container) getStatsForTxnWithKeyLocked( // Retrieve the per-transaction statistic object, and create it if it doesn't // exist yet. stats, ok := s.mu.txns[key] - if !ok && createIfNonexistent { - // If the uniqueTxnFingerprintCount is nil, then we don't check for - // fingerprint limit. - if s.atomic.uniqueTxnFingerprintCount != nil { - limit := s.uniqueTxnFingerprintLimit.Get(&s.st.SV) - incrementedFingerprintCount := - atomic.AddInt64(s.atomic.uniqueTxnFingerprintCount, int64(1) /* delts */) - - // If we have exceeded limit of fingerprint count, decrement the counter - // and abort. - if incrementedFingerprintCount > limit { - atomic.AddInt64(s.atomic.uniqueTxnFingerprintCount, -int64(1) /* delts */) - return nil /* stats */, false /* created */, true /* throttled */ - } + if ok || !createIfNonexistent { + return stats, false /* created */, false /* throttled */ + } + + // If the uniqueTxnFingerprintCount is nil, then we don't check for + // fingerprint limit. + if s.atomic.uniqueTxnFingerprintCount != nil { + limit := s.uniqueTxnFingerprintLimit.Get(&s.st.SV) + incrementedFingerprintCount := + atomic.AddInt64(s.atomic.uniqueTxnFingerprintCount, int64(1) /* delts */) + + // If we have exceeded limit of fingerprint count, decrement the counter + // and abort. + if incrementedFingerprintCount > limit { + atomic.AddInt64(s.atomic.uniqueTxnFingerprintCount, -int64(1) /* delts */) + return nil /* stats */, false /* created */, true /* throttled */ } - stats = &txnStats{} - stats.statementFingerprintIDs = stmtFingerprintIDs - s.mu.txns[key] = stats - return stats, true /* created */, false /* throttled */ } - return stats, false /* created */, false /* throttled */ + stats = &txnStats{} + stats.statementFingerprintIDs = stmtFingerprintIDs + s.mu.txns[key] = stats + return stats, true /* created */, false /* throttled */ } // SaveToLog saves the existing statement stats into the info log. func (s *Container) SaveToLog(ctx context.Context, appName string) { - s.mu.Lock() - defer s.mu.Unlock() + s.mu.RLock() + defer s.mu.RUnlock() if len(s.mu.stmts) == 0 { return } @@ -658,7 +688,10 @@ func (s *Container) Clear(ctx context.Context) { // large for the likely future workload. s.mu.stmts = make(map[stmtKey]*stmtStats, len(s.mu.stmts)/2) s.mu.txns = make(map[appstatspb.TransactionFingerprintID]*txnStats, len(s.mu.txns)/2) - s.mu.sampledPlanMetadataCache = make(map[sampledPlanKey]time.Time, len(s.mu.sampledPlanMetadataCache)/2) + + s.muPlanCache.Lock() + defer s.muPlanCache.Unlock() + s.muPlanCache.sampledPlanMetadataCache = make(map[sampledPlanKey]time.Time, len(s.muPlanCache.sampledPlanMetadataCache)/2) } // Free frees the accounted resources from the Container. The Container is @@ -768,8 +801,8 @@ func (s *Container) MergeApplicationTransactionStats( // a lock on a will cause a deadlock. func (s *Container) Add(ctx context.Context, other *Container) (err error) { statMap := func() map[stmtKey]*stmtStats { - other.mu.Lock() - defer other.mu.Unlock() + other.mu.RLock() + defer other.mu.RUnlock() statMap := make(map[stmtKey]*stmtStats) for k, v := range other.mu.stmts { @@ -946,16 +979,16 @@ func (s *transactionCounts) recordTransactionCounts( func (s *Container) getLogicalPlanLastSampled( key sampledPlanKey, ) (lastSampled time.Time, found bool) { - s.mu.Lock() - defer s.mu.Unlock() - lastSampled, found = s.mu.sampledPlanMetadataCache[key] + s.muPlanCache.RLock() + defer s.muPlanCache.RUnlock() + lastSampled, found = s.muPlanCache.sampledPlanMetadataCache[key] return lastSampled, found } func (s *Container) setLogicalPlanLastSampled(key sampledPlanKey, time time.Time) { - s.mu.Lock() - defer s.mu.Unlock() - s.mu.sampledPlanMetadataCache[key] = time + s.muPlanCache.Lock() + defer s.muPlanCache.Unlock() + s.muPlanCache.sampledPlanMetadataCache[key] = time } // shouldSaveLogicalPlanDescription returns whether we should save the sample diff --git a/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer.go b/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer.go index 9751379a8466..361c5a74bce2 100644 --- a/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer.go +++ b/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer.go @@ -103,20 +103,57 @@ func (s *Container) RecordStatement( return stmtFingerprintID, nil } + var errorCode string + if value.StatementError != nil { + errorCode = pgerror.GetPGCode(value.StatementError).String() + } + + s.observeInsightStatement(value, stmtFingerprintID, errorCode) + + var lastErrorCode string + if key.Failed { + lastErrorCode = pgerror.GetPGCode(value.StatementError).String() + } + + // Percentile latencies are only being sampled if the latency was above the + // AnomalyDetectionLatencyThreshold. + // The Insights detector already does a flush when detecting for anomaly latency, + // so there is no need to force a flush when retrieving the data during this step. + latencies := s.latencyInformation.GetPercentileValues(stmtFingerprintID, false) + latencyInfo := appstatspb.LatencyInfo{ + Min: value.ServiceLatencySec, + Max: value.ServiceLatencySec, + P50: latencies.P50, + P90: latencies.P90, + P99: latencies.P99, + } + + // Get the time outside the lock to reduce time in the lock + timeNow := s.getTimeNow() + + // setLogicalPlanLastSampled has its own lock so update it before taking + // the stats lock. + if value.Plan != nil { + s.setLogicalPlanLastSampled(statementKey.sampledPlanKey, timeNow) + } + + planGist := []string{value.PlanGist} + // Collect the per-statement statisticstats. + // Do as much computation before the lock to reduce the amount of time the + // lock is held which reduces lock contention. stats.mu.Lock() defer stats.mu.Unlock() stats.mu.data.Count++ if key.Failed { stats.mu.data.SensitiveInfo.LastErr = value.StatementError.Error() - stats.mu.data.LastErrorCode = pgerror.GetPGCode(value.StatementError).String() + stats.mu.data.LastErrorCode = lastErrorCode } // Only update MostRecentPlanDescription if we sampled a new PlanDescription. if value.Plan != nil { stats.mu.data.SensitiveInfo.MostRecentPlanDescription = *value.Plan - stats.mu.data.SensitiveInfo.MostRecentPlanTimestamp = s.getTimeNow() - s.setLogicalPlanLastSampled(statementKey.sampledPlanKey, stats.mu.data.SensitiveInfo.MostRecentPlanTimestamp) + stats.mu.data.SensitiveInfo.MostRecentPlanTimestamp = timeNow } if value.AutoRetryCount == 0 { stats.mu.data.FirstAttemptCount++ @@ -135,27 +172,14 @@ func (s *Container) RecordStatement( stats.mu.data.BytesRead.Record(stats.mu.data.Count, float64(value.BytesRead)) stats.mu.data.RowsRead.Record(stats.mu.data.Count, float64(value.RowsRead)) stats.mu.data.RowsWritten.Record(stats.mu.data.Count, float64(value.RowsWritten)) - stats.mu.data.LastExecTimestamp = s.getTimeNow() + stats.mu.data.LastExecTimestamp = timeNow stats.mu.data.Nodes = util.CombineUnique(stats.mu.data.Nodes, value.Nodes) if value.ExecStats != nil { stats.mu.data.Regions = util.CombineUnique(stats.mu.data.Regions, value.ExecStats.Regions) } - stats.mu.data.PlanGists = util.CombineUnique(stats.mu.data.PlanGists, []string{value.PlanGist}) - stats.mu.data.IndexRecommendations = value.IndexRecommendations + stats.mu.data.PlanGists = util.CombineUnique(stats.mu.data.PlanGists, planGist) stats.mu.data.Indexes = util.CombineUnique(stats.mu.data.Indexes, value.Indexes) - - // Percentile latencies are only being sampled if the latency was above the - // AnomalyDetectionLatencyThreshold. - // The Insights detector already does a flush when detecting for anomaly latency, - // so there is no need to force a flush when retrieving the data during this step. - latencies := s.latencyInformation.GetPercentileValues(stmtFingerprintID, false) - latencyInfo := appstatspb.LatencyInfo{ - Min: value.ServiceLatencySec, - Max: value.ServiceLatencySec, - P50: latencies.P50, - P90: latencies.P90, - P99: latencies.P99, - } + stats.mu.data.IndexRecommendations = value.IndexRecommendations stats.mu.data.LatencyInfo.Add(latencyInfo) // Note that some fields derived from tracing statements (such as @@ -192,6 +216,14 @@ func (s *Container) RecordStatement( } } + return stats.ID, nil +} + +func (s *Container) observeInsightStatement( + value sqlstats.RecordedStmtStats, + stmtFingerprintID appstatspb.StmtFingerprintID, + errorCode string, +) { var autoRetryReason string if value.AutoRetryReason != nil { autoRetryReason = value.AutoRetryReason.Error() @@ -204,11 +236,6 @@ func (s *Container) RecordStatement( cpuSQLNanos = value.ExecStats.CPUTime.Nanoseconds() } - var errorCode string - if value.StatementError != nil { - errorCode = pgerror.GetPGCode(value.StatementError).String() - } - s.insights.ObserveStatement(value.SessionID, &insights.Statement{ ID: value.StatementID, FingerprintID: stmtFingerprintID, @@ -230,8 +257,6 @@ func (s *Container) RecordStatement( CPUSQLNanos: cpuSQLNanos, ErrorCode: errorCode, }) - - return stats.ID, nil } // RecordStatementExecStats implements sqlstats.Writer interface. @@ -293,7 +318,12 @@ func (s *Container) RecordTransaction( return ErrFingerprintLimitReached } + // Insights logic is thread safe and does not require stats lock + s.observerInsightTransaction(value, key) + // Collect the per-transaction statistics. + // Do as much computation before the lock to reduce the amount of time the + // lock is held which reduces lock contention. stats.mu.Lock() defer stats.mu.Unlock() @@ -356,6 +386,12 @@ func (s *Container) RecordTransaction( stats.mu.data.ExecStats.MVCCIteratorStats.RangeKeySkippedPoints.Record(stats.mu.data.ExecStats.Count, float64(value.ExecStats.MvccRangeKeySkippedPoints)) } + return nil +} + +func (s *Container) observerInsightTransaction( + value sqlstats.RecordedTxnStats, key appstatspb.TransactionFingerprintID, +) { var retryReason string if value.AutoRetryReason != nil { retryReason = value.AutoRetryReason.Error() @@ -382,7 +418,6 @@ func (s *Container) RecordTransaction( AutoRetryReason: retryReason, CPUSQLNanos: cpuSQLNanos, }) - return nil } func (s *Container) recordTransactionHighLevelStats( From fb97251da5543ea07144c64a0e066f7c6e4de2be Mon Sep 17 00:00:00 2001 From: maryliag Date: Wed, 19 Jul 2023 14:09:46 -0400 Subject: [PATCH 5/7] tests: fix stories files Stories files used for testing were missing some fixtures. This commit adds all missing parameters. Epic: none Release note: None --- .../transactionDetails/transactionDetails.fixture.ts | 2 ++ .../transactionDetails/transactionDetails.stories.tsx | 9 +++++++++ .../src/transactionsPage/transactions.fixture.ts | 1 + .../src/transactionsPage/transactionsPage.stories.tsx | 11 +++++++++++ 4 files changed, 23 insertions(+) diff --git a/pkg/ui/workspaces/cluster-ui/src/transactionDetails/transactionDetails.fixture.ts b/pkg/ui/workspaces/cluster-ui/src/transactionDetails/transactionDetails.fixture.ts index 5228e98662f0..040dd30fd9d0 100644 --- a/pkg/ui/workspaces/cluster-ui/src/transactionDetails/transactionDetails.fixture.ts +++ b/pkg/ui/workspaces/cluster-ui/src/transactionDetails/transactionDetails.fixture.ts @@ -541,3 +541,5 @@ export const timeScale: TimeScale = { fixedWindowEnd: moment.utc("2021.12.31"), key: "Custom", }; + +export const requestTime = moment.utc("2023.01.5"); diff --git a/pkg/ui/workspaces/cluster-ui/src/transactionDetails/transactionDetails.stories.tsx b/pkg/ui/workspaces/cluster-ui/src/transactionDetails/transactionDetails.stories.tsx index f1c279357119..192cebc591fd 100644 --- a/pkg/ui/workspaces/cluster-ui/src/transactionDetails/transactionDetails.stories.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/transactionDetails/transactionDetails.stories.tsx @@ -14,6 +14,7 @@ import { MemoryRouter } from "react-router-dom"; import { noop } from "lodash"; import { nodeRegions, + requestTime, routeProps, timeScale, transactionDetailsData, @@ -46,6 +47,8 @@ storiesOf("Transactions Details", module) refreshTransactionInsights={noop} limit={100} reqSortSetting={StatsSortOptions.SERVICE_LAT} + requestTime={requestTime} + onRequestTimeChange={noop} txnStatsResp={{ lastUpdated: moment(), error: null, @@ -71,6 +74,8 @@ storiesOf("Transactions Details", module) refreshTransactionInsights={noop} limit={100} reqSortSetting={StatsSortOptions.SERVICE_LAT} + requestTime={requestTime} + onRequestTimeChange={noop} txnStatsResp={{ lastUpdated: moment(), error: null, @@ -96,6 +101,8 @@ storiesOf("Transactions Details", module) refreshTransactionInsights={noop} limit={100} reqSortSetting={StatsSortOptions.SERVICE_LAT} + requestTime={moment()} + onRequestTimeChange={noop} txnStatsResp={{ lastUpdated: moment(), error: null, @@ -122,6 +129,8 @@ storiesOf("Transactions Details", module) refreshTransactionInsights={noop} limit={100} reqSortSetting={StatsSortOptions.SERVICE_LAT} + requestTime={requestTime} + onRequestTimeChange={noop} txnStatsResp={{ lastUpdated: moment(), error: null, diff --git a/pkg/ui/workspaces/cluster-ui/src/transactionsPage/transactions.fixture.ts b/pkg/ui/workspaces/cluster-ui/src/transactionsPage/transactions.fixture.ts index 492e81ab6a41..5b426085fdad 100644 --- a/pkg/ui/workspaces/cluster-ui/src/transactionsPage/transactions.fixture.ts +++ b/pkg/ui/workspaces/cluster-ui/src/transactionsPage/transactions.fixture.ts @@ -53,6 +53,7 @@ export const timeScale: TimeScale = { export const timestamp = new protos.google.protobuf.Timestamp({ seconds: new Long(Date.parse("Sep 15 2021 01:00:00 GMT") * 1e-3), }); +export const requestTime = moment.utc("2023.01.5"); export const sortSetting: SortSetting = { ascending: false, diff --git a/pkg/ui/workspaces/cluster-ui/src/transactionsPage/transactionsPage.stories.tsx b/pkg/ui/workspaces/cluster-ui/src/transactionsPage/transactionsPage.stories.tsx index 4eddfc992b58..0faea9f8242d 100644 --- a/pkg/ui/workspaces/cluster-ui/src/transactionsPage/transactionsPage.stories.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/transactionsPage/transactionsPage.stories.tsx @@ -18,6 +18,7 @@ import { filters, lastUpdated, nodeRegions, + requestTime, routeProps, sortSetting, timeScale, @@ -69,6 +70,8 @@ storiesOf("Transactions Page", module) resetSQLStats={noop} search={""} sortSetting={sortSetting} + requestTime={requestTime} + onRequestTimeChange={noop} {...defaultLimitAndSortProps} /> ); @@ -99,6 +102,8 @@ storiesOf("Transactions Page", module) resetSQLStats={noop} search={""} sortSetting={sortSetting} + requestTime={requestTime} + onRequestTimeChange={noop} {...defaultLimitAndSortProps} /> ); @@ -136,6 +141,8 @@ storiesOf("Transactions Page", module) resetSQLStats={noop} search={""} sortSetting={sortSetting} + requestTime={requestTime} + onRequestTimeChange={noop} {...defaultLimitAndSortProps} /> ); @@ -166,6 +173,8 @@ storiesOf("Transactions Page", module) resetSQLStats={noop} search={""} sortSetting={sortSetting} + requestTime={requestTime} + onRequestTimeChange={noop} {...defaultLimitAndSortProps} /> ); @@ -200,6 +209,8 @@ storiesOf("Transactions Page", module) resetSQLStats={noop} search={""} sortSetting={sortSetting} + requestTime={requestTime} + onRequestTimeChange={noop} {...defaultLimitAndSortProps} /> ); From a05d100dfae0e0b18d4fe93770adf84a0d1b099e Mon Sep 17 00:00:00 2001 From: Drew Kimball Date: Wed, 19 Jul 2023 12:45:19 -0600 Subject: [PATCH 6/7] schemachanger: skip flakey test Skips `TestConcurrentDeclarativeSchemaChanges`. Informs #106732 Release note: None --- pkg/sql/schemachanger/BUILD.bazel | 1 + pkg/sql/schemachanger/schemachanger_test.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/pkg/sql/schemachanger/BUILD.bazel b/pkg/sql/schemachanger/BUILD.bazel index c54cb9c35277..c265403f0464 100644 --- a/pkg/sql/schemachanger/BUILD.bazel +++ b/pkg/sql/schemachanger/BUILD.bazel @@ -59,6 +59,7 @@ go_test( "//pkg/sql/tests", "//pkg/testutils", "//pkg/testutils/serverutils", + "//pkg/testutils/skip", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", "//pkg/util", diff --git a/pkg/sql/schemachanger/schemachanger_test.go b/pkg/sql/schemachanger/schemachanger_test.go index e1f99c5dc787..ea2d5617ee18 100644 --- a/pkg/sql/schemachanger/schemachanger_test.go +++ b/pkg/sql/schemachanger/schemachanger_test.go @@ -41,6 +41,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" @@ -60,6 +61,7 @@ import ( // schema changes operating on the same descriptors are performed serially. func TestConcurrentDeclarativeSchemaChanges(t *testing.T) { defer leaktest.AfterTest(t)() + skip.WithIssue(t, 106732, "flaky test") defer log.Scope(t).Close(t) ctx, cancel := context.WithCancel(context.Background()) From f52fe97ad8b98112f54b0c49165625fc16db1b94 Mon Sep 17 00:00:00 2001 From: Ricky Stewart Date: Wed, 19 Jul 2023 15:25:39 -0500 Subject: [PATCH 7/7] bazel: make `fastbuild` the default `-c` again; never strip for `dev` builds This is a partial revert of `60e8bcec61269c6648dc40c8094fbf303b8a02aa`. In that commit I made a change to make un-stripped binaries the default by making `dbg` the default `--compilation_mode`. This had unexpected consequences as this actually disables inlining and optimization, thereby breaking some tests, and overall it is surprising that a Go binary built by Bazel would be un-optimized by default since this is the opposite of `go build`'s default behavior. Instead, we make `fastbuild` the default (again), and to solve the problem of binaries being unstripped we set `--strip never` for `dev` builds. (`dev doctor` makes you clarify that your build is a `dev` build on setup so no one can miss this.) Now we have the following behavior for each `compilation_mode`: * `dbg`: disable optimizations and inlining. * `fastbuild` and `opt`: will produce the same binary, except `fastbuild` defaults to stripping in non-`dev` configurations. If you want an un-optimized binary for some reason, it still works to build with `-c dbg`. Epic: CRDB-17171 Release note: None --- .bazelrc | 2 +- build/teamcity/cockroach/ci/builds/build_impl.sh | 2 +- .../cockroach/nightlies/pebble_nightly_common.sh | 12 ++++++------ .../nightlies/pebble_nightly_race_common.sh | 8 ++++---- .../cockroach/nightlies/roachtest_compile_bits.sh | 8 ++++---- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.bazelrc b/.bazelrc index 67594bbdcd6a..4cfeacc25219 100644 --- a/.bazelrc +++ b/.bazelrc @@ -36,7 +36,7 @@ build --define gotags=bazel,gss build --experimental_proto_descriptor_sets_include_source_info build --incompatible_strict_action_env --incompatible_enable_cc_toolchain_resolution build --symlink_prefix=_bazel/ -build -c dbg +build:dev --strip=never common --experimental_allow_tags_propagation test --config=test --experimental_ui_max_stdouterr_bytes=10485760 build --ui_event_filters=-DEBUG diff --git a/build/teamcity/cockroach/ci/builds/build_impl.sh b/build/teamcity/cockroach/ci/builds/build_impl.sh index a37c1410bd9a..7c5b32fe3576 100755 --- a/build/teamcity/cockroach/ci/builds/build_impl.sh +++ b/build/teamcity/cockroach/ci/builds/build_impl.sh @@ -38,7 +38,7 @@ fi bazel build //pkg/cmd/bazci --config=ci BAZEL_BIN=$(bazel info bazel-bin --config=ci) -"$BAZEL_BIN/pkg/cmd/bazci/bazci_/bazci" -- build \ +"$BAZEL_BIN/pkg/cmd/bazci/bazci_/bazci" -- build -c opt \ --config "$CONFIG" --config ci $EXTRA_ARGS \ //pkg/cmd/cockroach-short //pkg/cmd/cockroach \ //pkg/cmd/cockroach-sql \ diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh index b4c6bdb53939..80464b7f651f 100755 --- a/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh @@ -27,8 +27,8 @@ mkdir -p "$PWD/bin" chmod o+rwx "$PWD/bin" # Build the roachtest binary. -bazel build //pkg/cmd/roachtest --config ci -BAZEL_BIN=$(bazel info bazel-bin --config ci) +bazel build //pkg/cmd/roachtest --config ci -c opt +BAZEL_BIN=$(bazel info bazel-bin --config ci -c opt) cp $BAZEL_BIN/pkg/cmd/roachtest/roachtest_/roachtest bin chmod a+w bin/roachtest @@ -39,8 +39,8 @@ chmod a+w bin/roachtest bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@latest NEW_DEPS_BZL_CONTENT=$(bazel run //pkg/cmd/mirror/go:mirror) echo "$NEW_DEPS_BZL_CONTENT" > DEPS.bzl -bazel build @com_github_cockroachdb_pebble//cmd/pebble --config ci -BAZEL_BIN=$(bazel info bazel-bin --config ci) +bazel build @com_github_cockroachdb_pebble//cmd/pebble --config ci -c opt +BAZEL_BIN=$(bazel info bazel-bin --config ci -c opt) cp $BAZEL_BIN/external/com_github_cockroachdb_pebble/cmd/pebble/pebble_/pebble ./pebble.linux chmod a+w ./pebble.linux @@ -67,8 +67,8 @@ function prepare_datadir() { # Build the mkbench tool from within the Pebble repo. This is used to parse # the benchmark data. function build_mkbench() { - bazel build @com_github_cockroachdb_pebble//internal/mkbench --config ci - BAZEL_BIN=$(bazel info bazel-bin --config ci) + bazel build @com_github_cockroachdb_pebble//internal/mkbench --config ci -c opt + BAZEL_BIN=$(bazel info bazel-bin --config ci -c opt) cp $BAZEL_BIN/external/com_github_cockroachdb_pebble/internal/mkbench/mkbench_/mkbench . chmod a+w mkbench } diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_race_common.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_race_common.sh index 43f2e0950345..631ce2e1b241 100755 --- a/build/teamcity/cockroach/nightlies/pebble_nightly_race_common.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_race_common.sh @@ -27,8 +27,8 @@ mkdir -p "$PWD/bin" chmod o+rwx "$PWD/bin" # Build the roachtest binary. -bazel build //pkg/cmd/roachtest --config ci -BAZEL_BIN=$(bazel info bazel-bin --config ci) +bazel build //pkg/cmd/roachtest --config ci -c opt +BAZEL_BIN=$(bazel info bazel-bin --config ci -c opt) cp $BAZEL_BIN/pkg/cmd/roachtest/roachtest_/roachtest bin chmod a+w bin/roachtest @@ -39,8 +39,8 @@ chmod a+w bin/roachtest bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@latest NEW_DEPS_BZL_CONTENT=$(bazel run //pkg/cmd/mirror/go:mirror) echo "$NEW_DEPS_BZL_CONTENT" > DEPS.bzl -bazel build @com_github_cockroachdb_pebble//cmd/pebble --config race --config ci -BAZEL_BIN=$(bazel info bazel-bin --config race --config ci) +bazel build @com_github_cockroachdb_pebble//cmd/pebble --config race --config ci -c opt +BAZEL_BIN=$(bazel info bazel-bin --config race --config ci -c opt) cp $BAZEL_BIN/external/com_github_cockroachdb_pebble/cmd/pebble/pebble_/pebble ./pebble.linux chmod a+w ./pebble.linux diff --git a/build/teamcity/cockroach/nightlies/roachtest_compile_bits.sh b/build/teamcity/cockroach/nightlies/roachtest_compile_bits.sh index 1d3405b6d56c..60e33c81e8f2 100644 --- a/build/teamcity/cockroach/nightlies/roachtest_compile_bits.sh +++ b/build/teamcity/cockroach/nightlies/roachtest_compile_bits.sh @@ -35,21 +35,21 @@ for platform in "${cross_builds[@]}"; do echo "Building $platform, os=$os, arch=$arch..." # Build cockroach, workload and geos libs. - bazel build --config $platform --config ci --config force_build_cdeps \ + bazel build --config $platform --config ci -c opt --config force_build_cdeps \ //pkg/cmd/cockroach //pkg/cmd/workload \ //c-deps:libgeos - BAZEL_BIN=$(bazel info bazel-bin --config $platform --config ci) + BAZEL_BIN=$(bazel info bazel-bin --config $platform --config ci -c opt) # N.B. roachtest is built once, for the host architecture. if [[ $os == "linux" && $arch == $host_arch ]]; then - bazel build --config $platform --config ci //pkg/cmd/roachtest + bazel build --config $platform --config ci -c opt //pkg/cmd/roachtest cp $BAZEL_BIN/pkg/cmd/roachtest/roachtest_/roachtest bin/roachtest # Make it writable to simplify cleanup and copying (e.g., scp retry). chmod a+w bin/roachtest fi # Build cockroach-short with assertions enabled. - bazel build --config $platform --config ci //pkg/cmd/cockroach-short --crdb_test + bazel build --config $platform --config ci -c opt //pkg/cmd/cockroach-short --crdb_test # Copy the binaries. cp $BAZEL_BIN/pkg/cmd/cockroach/cockroach_/cockroach bin/cockroach.$os-$arch cp $BAZEL_BIN/pkg/cmd/workload/workload_/workload bin/workload.$os-$arch