current_schema() → string | Returns the current schema.
diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go
index 97103b02ef85..ffd069388d01 100644
--- a/pkg/sql/sem/builtins/builtins.go
+++ b/pkg/sql/sem/builtins/builtins.go
@@ -3735,7 +3735,7 @@ value if you rely on the HLC for accuracy.`,
// Fuzzy String Matching
"soundex": makeBuiltin(
- tree.FunctionProperties{Category: builtinconstants.CategoryString},
+ tree.FunctionProperties{Category: builtinconstants.CategoryFuzzyStringMatching},
tree.Overload{
Types: tree.ArgTypes{{"source", types.String}},
ReturnType: tree.FixedReturnType(types.String),
@@ -3765,7 +3765,8 @@ value if you rely on the HLC for accuracy.`,
Volatility: volatility.Immutable,
},
),
- "levenshtein": makeBuiltin(defProps(),
+ "levenshtein": makeBuiltin(
+ tree.FunctionProperties{Category: builtinconstants.CategoryFuzzyStringMatching},
tree.Overload{
Types: tree.ArgTypes{{"source", types.String}, {"target", types.String}},
ReturnType: tree.FixedReturnType(types.Int),
@@ -4032,7 +4033,7 @@ value if you rely on the HLC for accuracy.`,
}),
"crdb_internal.read_file": makeBuiltin(
- jsonProps(),
+ tree.FunctionProperties{Category: builtinconstants.CategorySystemInfo},
tree.Overload{
Types: tree.ArgTypes{
{"uri", types.String},
@@ -4048,7 +4049,7 @@ value if you rely on the HLC for accuracy.`,
}),
"crdb_internal.write_file": makeBuiltin(
- jsonProps(),
+ tree.FunctionProperties{Category: builtinconstants.CategorySystemInfo},
tree.Overload{
Types: tree.ArgTypes{
{"data", types.Bytes},
diff --git a/pkg/sql/sem/builtins/generator_builtins.go b/pkg/sql/sem/builtins/generator_builtins.go
index 6ad4d26a1d50..702bd23c49f3 100644
--- a/pkg/sql/sem/builtins/generator_builtins.go
+++ b/pkg/sql/sem/builtins/generator_builtins.go
@@ -65,10 +65,10 @@ func genProps() tree.FunctionProperties {
}
}
-func genPropsWithLabels(returnLabels []string) tree.FunctionProperties {
+func jsonGenPropsWithLabels(returnLabels []string) tree.FunctionProperties {
return tree.FunctionProperties{
Class: tree.GeneratorClass,
- Category: builtinconstants.CategoryGenerator,
+ Category: builtinconstants.CategoryJSON,
ReturnLabels: returnLabels,
}
}
@@ -366,16 +366,16 @@ var generators = map[string]builtinDefinition{
),
),
- "json_array_elements": makeBuiltin(genPropsWithLabels(jsonArrayGeneratorLabels), jsonArrayElementsImpl),
- "jsonb_array_elements": makeBuiltin(genPropsWithLabels(jsonArrayGeneratorLabels), jsonArrayElementsImpl),
- "json_array_elements_text": makeBuiltin(genPropsWithLabels(jsonArrayGeneratorLabels), jsonArrayElementsTextImpl),
- "jsonb_array_elements_text": makeBuiltin(genPropsWithLabels(jsonArrayGeneratorLabels), jsonArrayElementsTextImpl),
+ "json_array_elements": makeBuiltin(jsonGenPropsWithLabels(jsonArrayGeneratorLabels), jsonArrayElementsImpl),
+ "jsonb_array_elements": makeBuiltin(jsonGenPropsWithLabels(jsonArrayGeneratorLabels), jsonArrayElementsImpl),
+ "json_array_elements_text": makeBuiltin(jsonGenPropsWithLabels(jsonArrayGeneratorLabels), jsonArrayElementsTextImpl),
+ "jsonb_array_elements_text": makeBuiltin(jsonGenPropsWithLabels(jsonArrayGeneratorLabels), jsonArrayElementsTextImpl),
"json_object_keys": makeBuiltin(genProps(), jsonObjectKeysImpl),
"jsonb_object_keys": makeBuiltin(genProps(), jsonObjectKeysImpl),
- "json_each": makeBuiltin(genPropsWithLabels(jsonEachGeneratorLabels), jsonEachImpl),
- "jsonb_each": makeBuiltin(genPropsWithLabels(jsonEachGeneratorLabels), jsonEachImpl),
- "json_each_text": makeBuiltin(genPropsWithLabels(jsonEachGeneratorLabels), jsonEachTextImpl),
- "jsonb_each_text": makeBuiltin(genPropsWithLabels(jsonEachGeneratorLabels), jsonEachTextImpl),
+ "json_each": makeBuiltin(jsonGenPropsWithLabels(jsonEachGeneratorLabels), jsonEachImpl),
+ "jsonb_each": makeBuiltin(jsonGenPropsWithLabels(jsonEachGeneratorLabels), jsonEachImpl),
+ "json_each_text": makeBuiltin(jsonGenPropsWithLabels(jsonEachGeneratorLabels), jsonEachTextImpl),
+ "jsonb_each_text": makeBuiltin(jsonGenPropsWithLabels(jsonEachGeneratorLabels), jsonEachTextImpl),
"json_populate_record": makeBuiltin(jsonPopulateProps, makeJSONPopulateImpl(makeJSONPopulateRecordGenerator,
"Expands the object in from_json to a row whose columns match the record type defined by base.",
)),
@@ -1506,7 +1506,7 @@ func (g *jsonEachGenerator) Values() (tree.Datums, error) {
var jsonPopulateProps = tree.FunctionProperties{
Class: tree.GeneratorClass,
- Category: builtinconstants.CategoryGenerator,
+ Category: builtinconstants.CategoryJSON,
}
func makeJSONPopulateImpl(gen eval.GeneratorWithExprsOverload, info string) tree.Overload {
From acbcd738079f9c1ab4e27d7a3d9a843dd877e4f7 Mon Sep 17 00:00:00 2001
From: Xiang Gu
Date: Tue, 27 Sep 2022 15:25:55 -0400
Subject: [PATCH 02/16] funcdesc: changed function receiver to be pointer
receiver
This will be needed when I later added a field to the function
builder which we wish to be able to set it.
---
pkg/sql/catalog/funcdesc/func_desc_builder.go | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/pkg/sql/catalog/funcdesc/func_desc_builder.go b/pkg/sql/catalog/funcdesc/func_desc_builder.go
index 3aa8373ef3a8..bf0d0140d64c 100644
--- a/pkg/sql/catalog/funcdesc/func_desc_builder.go
+++ b/pkg/sql/catalog/funcdesc/func_desc_builder.go
@@ -74,13 +74,13 @@ type functionDescriptorBuilder struct {
}
// DescriptorType implements the catalog.DescriptorBuilder interface.
-func (fdb functionDescriptorBuilder) DescriptorType() catalog.DescriptorType {
+func (fdb *functionDescriptorBuilder) DescriptorType() catalog.DescriptorType {
return catalog.Function
}
// RunPostDeserializationChanges implements the catalog.DescriptorBuilder
// interface.
-func (fdb functionDescriptorBuilder) RunPostDeserializationChanges() (err error) {
+func (fdb *functionDescriptorBuilder) RunPostDeserializationChanges() (err error) {
defer func() {
err = errors.Wrapf(err, "function %q (%d)", fdb.original.Name, fdb.original.ID)
}()
@@ -101,29 +101,29 @@ func (fdb functionDescriptorBuilder) RunPostDeserializationChanges() (err error)
}
// RunRestoreChanges implements the catalog.DescriptorBuilder interface.
-func (fdb functionDescriptorBuilder) RunRestoreChanges(
+func (fdb *functionDescriptorBuilder) RunRestoreChanges(
descLookupFn func(id descpb.ID) catalog.Descriptor,
) error {
return nil
}
// BuildImmutable implements the catalog.DescriptorBuilder interface.
-func (fdb functionDescriptorBuilder) BuildImmutable() catalog.Descriptor {
+func (fdb *functionDescriptorBuilder) BuildImmutable() catalog.Descriptor {
return fdb.BuildImmutableFunction()
}
// BuildExistingMutable implements the catalog.DescriptorBuilder interface.
-func (fdb functionDescriptorBuilder) BuildExistingMutable() catalog.MutableDescriptor {
+func (fdb *functionDescriptorBuilder) BuildExistingMutable() catalog.MutableDescriptor {
return fdb.BuildExistingMutableFunction()
}
// BuildCreatedMutable implements the catalog.DescriptorBuilder interface.
-func (fdb functionDescriptorBuilder) BuildCreatedMutable() catalog.MutableDescriptor {
+func (fdb *functionDescriptorBuilder) BuildCreatedMutable() catalog.MutableDescriptor {
return fdb.BuildCreatedMutableFunction()
}
// BuildImmutableFunction implements the FunctionDescriptorBuilder interface.
-func (fdb functionDescriptorBuilder) BuildImmutableFunction() catalog.FunctionDescriptor {
+func (fdb *functionDescriptorBuilder) BuildImmutableFunction() catalog.FunctionDescriptor {
desc := fdb.maybeModified
if desc == nil {
desc = fdb.original
@@ -136,7 +136,7 @@ func (fdb functionDescriptorBuilder) BuildImmutableFunction() catalog.FunctionDe
}
// BuildExistingMutableFunction implements the FunctionDescriptorBuilder interface.
-func (fdb functionDescriptorBuilder) BuildExistingMutableFunction() *Mutable {
+func (fdb *functionDescriptorBuilder) BuildExistingMutableFunction() *Mutable {
if fdb.maybeModified == nil {
fdb.maybeModified = protoutil.Clone(fdb.original).(*descpb.FunctionDescriptor)
}
@@ -151,7 +151,7 @@ func (fdb functionDescriptorBuilder) BuildExistingMutableFunction() *Mutable {
}
// BuildCreatedMutableFunction implements the FunctionDescriptorBuilder interface.
-func (fdb functionDescriptorBuilder) BuildCreatedMutableFunction() *Mutable {
+func (fdb *functionDescriptorBuilder) BuildCreatedMutableFunction() *Mutable {
desc := fdb.maybeModified
if desc == nil {
desc = fdb.original
From b6f8a2dc7fc5ea4df57eec3fbf5f346ec3f8c7ab Mon Sep 17 00:00:00 2001
From: Oleg Afanasyev
Date: Fri, 30 Sep 2022 12:10:01 +0100
Subject: [PATCH 03/16] builtins: fix doc formatting
Some builtins had docs incorrectly quoted resulting in wrong formatting
applied on generated web page docs.
Release note: None
---
docs/generated/sql/functions.md | 55 +++++++++++---------
pkg/sql/sem/builtins/generator_builtins.go | 4 +-
pkg/sql/sem/builtins/geo_builtins.go | 34 ++++++------
pkg/sql/sem/builtins/replication_builtins.go | 10 ++--
4 files changed, 52 insertions(+), 51 deletions(-)
diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md
index 5cbedb9784a6..9af4059ae638 100644
--- a/docs/generated/sql/functions.md
+++ b/docs/generated/sql/functions.md
@@ -1409,12 +1409,14 @@ the locality flag on node startup. Returns an error if no region is set.
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool | Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.
This function variant does not utilize any spatial index.
| Immutable |
-_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool | Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool | Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
The calculations performed are have a precision of 1cm.
This function utilizes the GeographicLib library for spheroid calculations.
This function variant does not utilize any spatial index.
| Immutable |
-_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool | Returns true if any of geography_a is within distance meters of geography_b, inclusive."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool | Returns true if any of geography_a is within distance meters of geography_b, inclusive.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
The calculations performed are have a precision of 1cm.
This function utilizes the S2 library for spherical calculations.
This function utilizes the GeographicLib library for spheroid calculations.
@@ -1423,12 +1425,14 @@ the locality flag on node startup. Returns an error if no region is set.
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool | Returns true if any of geometry_a is within distance units of geometry_b, inclusive.
This function variant does not utilize any spatial index.
| Immutable |
-_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool | Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool | Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
The calculations performed are have a precision of 1cm.
This function utilizes the GeographicLib library for spheroid calculations.
This function variant does not utilize any spatial index.
| Immutable |
-_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool | Returns true if any of geography_a is within distance meters of geography_b, exclusive."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool | Returns true if any of geography_a is within distance meters of geography_b, exclusive.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
The calculations performed are have a precision of 1cm.
This function utilizes the S2 library for spherical calculations.
This function utilizes the GeographicLib library for spheroid calculations.
@@ -1876,10 +1880,12 @@ from the given Geometry.
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool | Returns true if geometry_a does not overlap, touch or is within geometry_b.
This function utilizes the GEOS module.
| Immutable |
-st_distance(geography_a: geography, geography_b: geography) → float | Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+st_distance(geography_a: geography, geography_b: geography) → float | Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
This function utilizes the GeographicLib library for spheroid calculations.
| Immutable |
-st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float | Returns the distance in meters between geography_a and geography_b."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float | Returns the distance in meters between geography_a and geography_b.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
This function utilizes the S2 library for spherical calculations.
This function utilizes the GeographicLib library for spheroid calculations.
| Immutable |
@@ -1891,16 +1897,19 @@ from the given Geometry.
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float | Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.
This function utilizes the S2 library for spherical calculations.
| Immutable |
-st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float | Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float | Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
This function utilizes the S2 library for spherical calculations.
This function utilizes the GeographicLib library for spheroid calculations.
| Immutable |
-st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool | Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool | Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
The calculations performed are have a precision of 1cm.
This function utilizes the GeographicLib library for spheroid calculations.
This function variant will attempt to utilize any available spatial index.
| Immutable |
-st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool | Returns true if any of geography_a is within distance meters of geography_b, inclusive."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool | Returns true if any of geography_a is within distance meters of geography_b, inclusive.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
The calculations performed are have a precision of 1cm.
This function utilizes the S2 library for spherical calculations.
This function utilizes the GeographicLib library for spheroid calculations.
@@ -1913,12 +1922,14 @@ from the given Geometry.
This function variant will attempt to utilize any available spatial index.
This variant will cast all geometry_str arguments into Geometry types.
| Immutable |
-st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool | Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool | Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
The calculations performed are have a precision of 1cm.
This function utilizes the GeographicLib library for spheroid calculations.
This function variant will attempt to utilize any available spatial index.
| Immutable |
-st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool | Returns true if any of geography_a is within distance meters of geography_b, exclusive."\n\nWhen operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
+st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool | Returns true if any of geography_a is within distance meters of geography_b, exclusive.
+When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.
The calculations performed are have a precision of 1cm.
This function utilizes the S2 library for spherical calculations.
This function utilizes the GeographicLib library for spheroid calculations.
@@ -2450,19 +2461,13 @@ Negative azimuth values and values greater than 2π (360 degrees) are supported.
| Immutable |
st_s2covering(geography: geography) → geography | Returns a geography which represents the S2 covering used by the index using the default index configuration.
| Immutable |
-st_s2covering(geography: geography, settings: string) → geography | Returns a geography which represents the S2 covering used by the index using the index configuration specified
-by the settings parameter.
-The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX … WITH (…),
-e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using
-SELECT ST_S2Covering(geography, ‘s2_max_level=15,s2_level_mod=3’).
+st_s2covering(geography: geography, settings: string) → geography | Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.
+The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...) , e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3') .
| Immutable |
st_s2covering(geometry: geometry) → geometry | Returns a geometry which represents the S2 covering used by the index using the default index configuration.
| Immutable |
-st_s2covering(geometry: geometry, settings: string) → geometry | Returns a geometry which represents the S2 covering used by the index using the index configuration specified
-by the settings parameter.
-The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX … WITH (…),
-e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using
-SELECT ST_S2Covering(geometry, ‘s2_max_level=15,s2_level_mod=3’).
+st_s2covering(geometry: geometry, settings: string) → geometry | Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.
+The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...) , e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')
| Immutable |
st_scale(g: geometry, factor: geometry) → geometry | Returns a modified Geometry scaled by taking in a Geometry as the factor.
| Immutable |
@@ -2628,9 +2633,9 @@ The swap_ordinate_string parameter is a 2-character string naming the ordinates
crdb_internal.complete_replication_stream(stream_id: int, successful_ingestion: bool) → int | This function can be used on the producer side to complete and clean up a replication stream.‘successful_ingestion’ indicates whether the stream ingestion finished successfully.
| Volatile |
-crdb_internal.complete_stream_ingestion_job(job_id: int, cutover_ts: timestamptz) → int | This function can be used to signal a running stream ingestion job to complete. The job will eventually stop ingesting, revert to the specified timestamp and leave the cluster in a consistent state. The specified timestamp can only be specified up to the microsecond. This function does not wait for the job to reach a terminal state, but instead returns the job id as soon as it has signaled the job to complete. This builtin can be used in conjunction with SHOW JOBS WHEN COMPLETE to ensure that the job has left the cluster in a consistent state.
+crdb_internal.complete_stream_ingestion_job(job_id: int, cutover_ts: timestamptz) → int | This function can be used to signal a running stream ingestion job to complete. The job will eventually stop ingesting, revert to the specified timestamp and leave the cluster in a consistent state. The specified timestamp can only be specified up to the microsecond. This function does not wait for the job to reach a terminal state, but instead returns the job id as soon as it has signaled the job to complete. This builtin can be used in conjunction with SHOW JOBS WHEN COMPLETE to ensure that the job has left the cluster in a consistent state.
| Volatile |
-crdb_internal.replication_stream_progress(stream_id: int, frontier_ts: string) → bytes | This function can be used on the consumer side to heartbeat its replication progress to a replication stream in the source cluster. The returns a StreamReplicationStatus message that indicates stream status (ACTIVE, PAUSED, INACTIVE, or STATUS_UNKNOWN_RETRY).
+crdb_internal.replication_stream_progress(stream_id: int, frontier_ts: string) → bytes | This function can be used on the consumer side to heartbeat its replication progress to a replication stream in the source cluster. The returns a StreamReplicationStatus message that indicates stream status (ACTIVE , PAUSED , INACTIVE , or STATUS_UNKNOWN_RETRY ).
| Volatile |
crdb_internal.replication_stream_spec(stream_id: int) → bytes | This function can be used on the consumer side to get a replication stream specification for the specified stream. The consumer will later call ‘stream_partition’ to a partition with the spec to start streaming.
| Volatile |
@@ -3019,8 +3024,8 @@ may increase either contention or retry errors, or both.
crdb_internal.assignment_cast(val: anyelement, type: anyelement) → anyelement | This function is used internally to perform assignment casts during mutations.
| Stable |
crdb_internal.check_consistency(stats_only: bool, start_key: bytes, end_key: bytes) → tuple{int AS range_id, bytes AS start_key, string AS start_key_pretty, string AS status, string AS detail, interval AS duration} | Runs a consistency check on ranges touching the specified key range. an empty start or end key is treated as the minimum and maximum possible, respectively. stats_only should only be set to false when targeting a small number of ranges to avoid overloading the cluster. Each returned row contains the range ID, the status (a roachpb.CheckConsistencyResponse_Status), and verbose detail.
-Example usage:
-SELECT * FROM crdb_internal.check_consistency(true, ‘\x02’, ‘\x04’)
+Example usage:
+SELECT * FROM crdb_internal.check_consistency(true, b'\x02', b'\x04')
| Volatile |
crdb_internal.check_password_hash_format(password: bytes) → string | This function checks whether a string is a precomputed password hash. Returns the hash algorithm.
| Immutable |
diff --git a/pkg/sql/sem/builtins/generator_builtins.go b/pkg/sql/sem/builtins/generator_builtins.go
index b4a89d644752..2f5b436094cc 100644
--- a/pkg/sql/sem/builtins/generator_builtins.go
+++ b/pkg/sql/sem/builtins/generator_builtins.go
@@ -412,8 +412,8 @@ var generators = map[string]builtinDefinition{
"small number of ranges to avoid overloading the cluster. Each returned row "+
"contains the range ID, the status (a roachpb.CheckConsistencyResponse_Status), "+
"and verbose detail.\n\n"+
- "Example usage:\n"+
- "SELECT * FROM crdb_internal.check_consistency(true, '\\x02', '\\x04')",
+ "Example usage:\n\n"+
+ "`SELECT * FROM crdb_internal.check_consistency(true, b'\\x02', b'\\x04')`",
volatility.Volatile,
),
),
diff --git a/pkg/sql/sem/builtins/geo_builtins.go b/pkg/sql/sem/builtins/geo_builtins.go
index f852ef8dca9a..96bdfd6c136a 100644
--- a/pkg/sql/sem/builtins/geo_builtins.go
+++ b/pkg/sql/sem/builtins/geo_builtins.go
@@ -59,9 +59,9 @@ const (
)
const usesSpheroidMessage = " Uses a spheroid to perform the operation."
-const spheroidDistanceMessage = `"\n\nWhen operating on a spheroid, this function will use the sphere to calculate ` +
- `the closest two points. The spheroid distance between these two points is calculated using GeographicLib. ` +
- `This follows observed PostGIS behavior.`
+const spheroidDistanceMessage = "\n\nWhen operating on a spheroid, this function will use the sphere to calculate " +
+ "the closest two points. The spheroid distance between these two points is calculated using GeographicLib. " +
+ "This follows observed PostGIS behavior."
const (
defaultWKTDecimalDigits = 15
@@ -598,14 +598,12 @@ var geoBuiltins = map[string]builtinDefinition{
return tree.NewDGeometry(ret), nil
},
Info: infoBuilder{
- info: `
-Returns a geometry which represents the S2 covering used by the index using the index configuration specified
-by the settings parameter.
-
-The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...),
-e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using
-SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3').
-`,
+ info: "Returns a geometry which represents the S2 covering used by the index using the " +
+ "index configuration specified by the settings parameter.\n\n" +
+ "The settings parameter uses the same format as the parameters inside " +
+ "the `WITH` in `CREATE INDEX ... WITH (...)`, " +
+ "e.g. `CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3)` " +
+ "can be tried using `SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')`",
}.String(),
Volatility: volatility.Immutable,
},
@@ -643,14 +641,12 @@ SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3').
return tree.NewDGeography(ret), nil
},
Info: infoBuilder{
- info: `
-Returns a geography which represents the S2 covering used by the index using the index configuration specified
-by the settings parameter.
-
-The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...),
-e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using
-SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').
-`,
+ info: "Returns a geography which represents the S2 covering used by the " +
+ "index using the index configuration specified by the settings parameter.\n\n" +
+ "The settings parameter uses the same format as the parameters inside the " +
+ "`WITH` in `CREATE INDEX ... WITH (...)`, e.g. " +
+ "`CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3)` " +
+ "can be tried using `SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3')`.",
}.String(),
Volatility: volatility.Immutable,
},
diff --git a/pkg/sql/sem/builtins/replication_builtins.go b/pkg/sql/sem/builtins/replication_builtins.go
index 363e353e316e..4cc8d0719a65 100644
--- a/pkg/sql/sem/builtins/replication_builtins.go
+++ b/pkg/sql/sem/builtins/replication_builtins.go
@@ -65,12 +65,12 @@ var replicationBuiltins = map[string]builtinDefinition{
},
Info: "This function can be used to signal a running stream ingestion job to complete. " +
"The job will eventually stop ingesting, revert to the specified timestamp and leave the " +
- "cluster in a consistent state. The specified timestamp can only be specified up to the" +
- " microsecond. " +
+ "cluster in a consistent state. The specified timestamp can only be specified up to the " +
+ "microsecond. " +
"This function does not wait for the job to reach a terminal state, " +
"but instead returns the job id as soon as it has signaled the job to complete. " +
- "This builtin can be used in conjunction with SHOW JOBS WHEN COMPLETE to ensure that the" +
- " job has left the cluster in a consistent state.",
+ "This builtin can be used in conjunction with `SHOW JOBS WHEN COMPLETE` to ensure that the " +
+ "job has left the cluster in a consistent state.",
Volatility: volatility.Volatile,
},
),
@@ -221,7 +221,7 @@ var replicationBuiltins = map[string]builtinDefinition{
},
Info: "This function can be used on the consumer side to heartbeat its replication progress to " +
"a replication stream in the source cluster. The returns a StreamReplicationStatus message " +
- "that indicates stream status (ACTIVE, PAUSED, INACTIVE, or STATUS_UNKNOWN_RETRY).",
+ "that indicates stream status (`ACTIVE`, `PAUSED`, `INACTIVE`, or `STATUS_UNKNOWN_RETRY`).",
Volatility: volatility.Volatile,
},
),
From 251fcd7f5d952c83f62fff3e8ac63e042084e451 Mon Sep 17 00:00:00 2001
From: Xin Hao Zhang
Date: Tue, 27 Sep 2022 16:08:19 -0400
Subject: [PATCH 04/16] cluster-ui: add helper to determine empty sql results
This commit adds a helper function, `sqlResultsAreEmpty` to
the sql api that determines whether there are execution
results in the request response.
Release note: None
---
.../cluster-ui/src/api/clusterLocksApi.ts | 6 +-
.../cluster-ui/src/api/insightsApi.ts | 15 ++--
.../cluster-ui/src/api/schedulesApi.ts | 8 +-
.../cluster-ui/src/api/schemaInsightsApi.ts | 3 +-
.../cluster-ui/src/api/sqlApi.spec.ts | 86 +++++++++++++++++++
.../workspaces/cluster-ui/src/api/sqlApi.ts | 11 +++
6 files changed, 115 insertions(+), 14 deletions(-)
create mode 100644 pkg/ui/workspaces/cluster-ui/src/api/sqlApi.spec.ts
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/clusterLocksApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/clusterLocksApi.ts
index 4ba504095873..d45570f7216d 100644
--- a/pkg/ui/workspaces/cluster-ui/src/api/clusterLocksApi.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/api/clusterLocksApi.ts
@@ -13,6 +13,7 @@ import {
executeInternalSql,
LONG_TIMEOUT,
SqlExecutionRequest,
+ sqlResultsAreEmpty,
} from "./sqlApi";
export type ClusterLockState = {
@@ -73,10 +74,7 @@ WHERE
};
return executeInternalSql(request).then(result => {
- if (
- result.execution.txn_results.length === 0 ||
- !result.execution.txn_results[0].rows
- ) {
+ if (sqlResultsAreEmpty(result)) {
// No data.
return [];
}
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/insightsApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/insightsApi.ts
index 30611bc15ee1..9f68d72156a6 100644
--- a/pkg/ui/workspaces/cluster-ui/src/api/insightsApi.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/api/insightsApi.ts
@@ -15,6 +15,7 @@ import {
LONG_TIMEOUT,
SqlExecutionRequest,
SqlExecutionResponse,
+ sqlResultsAreEmpty,
} from "./sqlApi";
import {
BlockedContentionDetails,
@@ -199,10 +200,10 @@ export function getTransactionInsightEventState(): Promise(
txnContentionRequest,
).then(contentionResults => {
- const res = contentionResults.execution.txn_results[0].rows;
- if (!res || res.length < 1) {
+ if (sqlResultsAreEmpty(contentionResults)) {
return;
}
+ const res = contentionResults.execution.txn_results[0].rows;
const txnFingerprintIDs = res.map(row => row.waiting_txn_fingerprint_id);
const txnFingerprintRequest: SqlExecutionRequest = {
statements: [
@@ -217,11 +218,11 @@ export function getTransactionInsightEventState(): Promise(
txnFingerprintRequest,
).then(txnStmtFingerprintResults => {
- const txnStmtRes =
- txnStmtFingerprintResults.execution.txn_results[0].rows;
- if (!txnStmtRes || txnStmtRes.length < 1) {
+ if (sqlResultsAreEmpty(txnStmtFingerprintResults)) {
return;
}
+ const txnStmtRes =
+ txnStmtFingerprintResults.execution.txn_results[0].rows;
const stmtFingerprintIDs = txnStmtRes.map(row => row.query_ids);
const fingerprintStmtsRequest: SqlExecutionRequest = {
statements: [
@@ -419,10 +420,10 @@ export function getTransactionInsightEventDetailsState(
return executeInternalSql(
txnContentionDetailsRequest,
).then(contentionResults => {
- const res = contentionResults.execution.txn_results[0].rows;
- if (!res || res.length < 1) {
+ if (sqlResultsAreEmpty(contentionResults)) {
return;
}
+ const res = contentionResults.execution.txn_results[0].rows;
const waitingTxnFingerprintId = res[0].waiting_txn_fingerprint_id;
const waitingTxnFingerprintRequest: SqlExecutionRequest = {
statements: [
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/schedulesApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/schedulesApi.ts
index 85ccdcdf593c..bf4907ff5c50 100644
--- a/pkg/ui/workspaces/cluster-ui/src/api/schedulesApi.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/api/schedulesApi.ts
@@ -10,7 +10,11 @@
import Long from "long";
import moment from "moment";
-import { executeInternalSql, SqlExecutionRequest } from "./sqlApi";
+import {
+ executeInternalSql,
+ SqlExecutionRequest,
+ sqlResultsAreEmpty,
+} from "./sqlApi";
import { RequestError } from "../util";
type ScheduleColumns = {
@@ -76,7 +80,7 @@ export function getSchedules(req: {
};
return executeInternalSql(request).then(result => {
const txn_results = result.execution.txn_results;
- if (txn_results.length === 0 || !txn_results[0].rows) {
+ if (sqlResultsAreEmpty(result)) {
// No data.
return [];
}
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/schemaInsightsApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/schemaInsightsApi.ts
index b47f7f454ecb..e3a7c1f6fd34 100644
--- a/pkg/ui/workspaces/cluster-ui/src/api/schemaInsightsApi.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/api/schemaInsightsApi.ts
@@ -13,6 +13,7 @@ import {
SqlTxnResult,
executeInternalSql,
LONG_TIMEOUT,
+ sqlResultsAreEmpty,
} from "./sqlApi";
import {
InsightRecommendation,
@@ -186,7 +187,7 @@ export function getSchemaInsights(): Promise {
};
return executeInternalSql(request).then(result => {
const results: InsightRecommendation[] = [];
- if (result.execution.txn_results.length === 0) {
+ if (sqlResultsAreEmpty(result)) {
// No data.
return results;
}
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.spec.ts b/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.spec.ts
new file mode 100644
index 000000000000..444d580dd204
--- /dev/null
+++ b/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.spec.ts
@@ -0,0 +1,86 @@
+// Copyright 2022 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+import { SqlExecutionResponse, sqlResultsAreEmpty } from "./sqlApi";
+
+describe("sqlApi", () => {
+ test("sqlResultsAreEmpty should return true when there are no rows in the response", () => {
+ const testCases: {
+ response: SqlExecutionResponse;
+ expected: boolean;
+ }[] = [
+ {
+ response: {
+ num_statements: 1,
+ execution: {
+ retries: 0,
+ txn_results: [
+ {
+ statement: 1,
+ tag: "SELECT",
+ start: "start-date",
+ end: "end-date",
+ rows_affected: 0,
+ rows: [{ hello: "world" }],
+ },
+ ],
+ },
+ },
+ expected: false,
+ },
+ {
+ response: {
+ num_statements: 1,
+ execution: {
+ retries: 0,
+ txn_results: [
+ {
+ statement: 1,
+ tag: "SELECT",
+ start: "start-date",
+ end: "end-date",
+ rows_affected: 0,
+ rows: [],
+ },
+ ],
+ },
+ },
+ expected: true,
+ },
+ {
+ response: {
+ num_statements: 1,
+ execution: {
+ retries: 0,
+ txn_results: [
+ {
+ statement: 1,
+ tag: "SELECT",
+ start: "start-date",
+ end: "end-date",
+ rows_affected: 0,
+ columns: [],
+ },
+ ],
+ },
+ },
+ expected: true,
+ },
+ {
+ response: {},
+ expected: true,
+ },
+ ];
+
+ testCases.forEach(tc => {
+ expect(sqlResultsAreEmpty(tc.response)).toEqual(tc.expected);
+ });
+ });
+});
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.ts
index 88774147ed3c..5a6d5afe413d 100644
--- a/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.ts
@@ -102,3 +102,14 @@ export function executeInternalSql(
return executeSql(req);
}
+
+export function sqlResultsAreEmpty(
+ result: SqlExecutionResponse,
+): boolean {
+ return (
+ !result.execution?.txn_results?.length ||
+ result.execution.txn_results.every(
+ txn => !txn.rows || txn.rows.length === 0,
+ )
+ );
+}
From a78e3d5a6d417ddff0473dfdcb5ab62ee3a02c11 Mon Sep 17 00:00:00 2001
From: Xin Hao Zhang
Date: Tue, 27 Sep 2022 16:24:13 -0400
Subject: [PATCH 05/16] cluster-ui: export sql api exec functions with pkg
Release note: None
---
pkg/ui/workspaces/cluster-ui/src/api/index.ts | 1 +
pkg/ui/workspaces/cluster-ui/src/api/sqlApi.ts | 6 ++++++
2 files changed, 7 insertions(+)
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/index.ts b/pkg/ui/workspaces/cluster-ui/src/api/index.ts
index 346d6d395a59..39419d221e6d 100644
--- a/pkg/ui/workspaces/cluster-ui/src/api/index.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/api/index.ts
@@ -18,3 +18,4 @@ export * from "./insightsApi";
export * from "./indexActionsApi";
export * from "./schemaInsightsApi";
export * from "./schedulesApi";
+export * from "./sqlApi";
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.ts
index 5a6d5afe413d..ba6167c44fd5 100644
--- a/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/api/sqlApi.ts
@@ -103,6 +103,12 @@ export function executeInternalSql(
return executeSql(req);
}
+/**
+ * sqlResultsAreEmpty returns true if the provided result
+ * does not contain any rows.
+ * @param result the sql execution result returned by the server
+ * @returns
+ */
export function sqlResultsAreEmpty(
result: SqlExecutionResponse,
): boolean {
From 5aa84954770cfe8ddc6fdac231bffb625462bd25 Mon Sep 17 00:00:00 2001
From: Jane Xing
Date: Wed, 28 Sep 2022 14:42:58 -0500
Subject: [PATCH 06/16] sql: add TxnWithExecutor() and methods to
InternalExecutorFactory
Having `TxnWithExecutor()` in `descs.CollectionFactory` is unnatural, and will
bring dependency loop headaches. This commit is to add the same logic under
`InternalExecutorFactory`. We will remove
`descs.CollectionFactory.TxnWithExecutor()` in a future PR.
Release note: None
---
pkg/sql/catalog/descs/collection.go | 5 +
pkg/sql/catalog/descs/factory.go | 30 ++++++
pkg/sql/internal.go | 146 +++++++++++++++++++++++++++
pkg/sql/sqlutil/internal_executor.go | 47 +++++++++
4 files changed, 228 insertions(+)
diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go
index f37a2b5c7cd0..6eb7c02d20ef 100644
--- a/pkg/sql/catalog/descs/collection.go
+++ b/pkg/sql/catalog/descs/collection.go
@@ -138,6 +138,11 @@ type Collection struct {
var _ catalog.Accessor = (*Collection)(nil)
+// GetDeletedDescs returns the deleted descriptors of the collection.
+func (tc *Collection) GetDeletedDescs() catalog.DescriptorIDSet {
+ return tc.deletedDescs
+}
+
// MaybeUpdateDeadline updates the deadline in a given transaction
// based on the leased descriptors in this collection. This update is
// only done when a deadline exists.
diff --git a/pkg/sql/catalog/descs/factory.go b/pkg/sql/catalog/descs/factory.go
index aa8b1460445e..401cc6a5ae99 100644
--- a/pkg/sql/catalog/descs/factory.go
+++ b/pkg/sql/catalog/descs/factory.go
@@ -41,6 +41,11 @@ type CollectionFactory struct {
ieFactoryWithTxn InternalExecutorFactoryWithTxn
}
+// GetClusterSettings returns the cluster setting from the collection factory.
+func (cf *CollectionFactory) GetClusterSettings() *cluster.Settings {
+ return cf.settings
+}
+
// InternalExecutorFactoryWithTxn is used to create an internal executor
// with associated extra txn state information.
// It should only be used as a field hanging off CollectionFactory.
@@ -53,6 +58,31 @@ type InternalExecutorFactoryWithTxn interface {
txn *kv.Txn,
descCol *Collection,
) (sqlutil.InternalExecutor, InternalExecutorCommitTxnFunc)
+
+ // DescsTxnWithExecutor enables using an internal executor to run sql
+ // statements in a transactional manner. It creates a descriptor collection
+ // that lives within the scope of the passed in TxnWithExecutorFunc, and
+ // also ensures that the internal executor also share the same descriptor
+ // collection. Please use this interface if you want to run multiple sql
+ // statement with an internal executor in a txn.
+ DescsTxnWithExecutor(
+ ctx context.Context,
+ db *kv.DB,
+ sd *sessiondata.SessionData,
+ f TxnWithExecutorFunc,
+ opts ...sqlutil.TxnOption,
+ ) error
+
+ // DescsTxn is similar to DescsTxnWithExecutor but without an internal executor.
+ // It creates a descriptor collection that lives within the scope of the given
+ // function, and is a convenient method for running a transaction on
+ // them.
+ DescsTxn(
+ ctx context.Context,
+ db *kv.DB,
+ f func(context.Context, *kv.Txn, *Collection) error,
+ opts ...sqlutil.TxnOption,
+ ) error
}
// InternalExecutorCommitTxnFunc is to commit the txn associated with an
diff --git a/pkg/sql/internal.go b/pkg/sql/internal.go
index a52786c7c818..b416485a79c4 100644
--- a/pkg/sql/internal.go
+++ b/pkg/sql/internal.go
@@ -27,6 +27,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase"
"github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants"
@@ -40,6 +41,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/fsm"
"github.com/cockroachdb/cockroach/pkg/util/mon"
+ "github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
@@ -1355,3 +1357,147 @@ func (ief *InternalExecutorFactory) RunWithoutTxn(
ie := ief.NewInternalExecutor(nil /* sessionData */)
return run(ctx, ie)
}
+
+type kvTxnFunc = func(context.Context, *kv.Txn) error
+
+// ApplyTxnOptions is to apply the txn options and returns the txn generator
+// function.
+func ApplyTxnOptions(
+ db *kv.DB, opts ...sqlutil.TxnOption,
+) func(ctx context.Context, f kvTxnFunc) error {
+ var config sqlutil.TxnConfig
+ for _, opt := range opts {
+ opt.Apply(&config)
+ }
+ run := db.Txn
+
+ if config.GetSteppingEnabled() {
+
+ run = func(ctx context.Context, f kvTxnFunc) error {
+ return db.TxnWithSteppingEnabled(ctx, sessiondatapb.Normal, f)
+ }
+ }
+ return run
+}
+
+// DescsTxnWithExecutor enables callers to run transactions with a *Collection
+// such that all retrieved immutable descriptors are properly leased and all mutable
+// descriptors are handled. The function deals with verifying the two version
+// invariant and retrying when it is violated. Callers need not worry that they
+// write mutable descriptors multiple times. The call will explicitly wait for
+// the leases to drain on old versions of descriptors modified or deleted in the
+// transaction; callers do not need to call lease.WaitForOneVersion.
+// It also enables using internal executor to run sql queries in a txn manner.
+//
+// The passed transaction is pre-emptively anchored to the system config key on
+// the system tenant.
+func (ief *InternalExecutorFactory) DescsTxnWithExecutor(
+ ctx context.Context,
+ db *kv.DB,
+ sd *sessiondata.SessionData,
+ f descs.TxnWithExecutorFunc,
+ opts ...sqlutil.TxnOption,
+) error {
+ run := ApplyTxnOptions(db, opts...)
+
+ // Waits for descriptors that were modified, skipping
+ // over ones that had their descriptor wiped.
+ waitForDescriptors := func(modifiedDescriptors []lease.IDVersion, deletedDescs catalog.DescriptorIDSet) error {
+ // Wait for a single version on leased descriptors.
+ for _, ld := range modifiedDescriptors {
+ waitForNoVersion := deletedDescs.Contains(ld.ID)
+ retryOpts := retry.Options{
+ InitialBackoff: time.Millisecond,
+ Multiplier: 1.5,
+ MaxBackoff: time.Second,
+ }
+ // Detect unpublished ones.
+ if waitForNoVersion {
+ err := ief.server.cfg.LeaseManager.WaitForNoVersion(ctx, ld.ID, retryOpts)
+ if err != nil {
+ return err
+ }
+ } else {
+ _, err := ief.server.cfg.LeaseManager.WaitForOneVersion(ctx, ld.ID, retryOpts)
+ // If the descriptor has been deleted, just wait for leases to drain.
+ if errors.Is(err, catalog.ErrDescriptorNotFound) {
+ err = ief.server.cfg.LeaseManager.WaitForNoVersion(ctx, ld.ID, retryOpts)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+
+ cf := ief.server.cfg.CollectionFactory
+ for {
+ var withNewVersion []lease.IDVersion
+ var deletedDescs catalog.DescriptorIDSet
+ if err := run(ctx, func(ctx context.Context, txn *kv.Txn) (err error) {
+ withNewVersion, deletedDescs = nil, catalog.DescriptorIDSet{}
+ descsCol := cf.NewCollection(
+ ctx, nil, /* temporarySchemaProvider */
+ ief.MemoryMonitor(),
+ )
+ defer descsCol.ReleaseAll(ctx)
+ ie, commitTxnFn := ief.NewInternalExecutorWithTxn(sd, &cf.GetClusterSettings().SV, txn, descsCol)
+ if err := f(ctx, txn, descsCol, ie); err != nil {
+ return err
+ }
+ deletedDescs = descsCol.GetDeletedDescs()
+ withNewVersion, err = descsCol.GetOriginalPreviousIDVersionsForUncommitted()
+ if err != nil {
+ return err
+ }
+ return commitTxnFn(ctx)
+ }); descs.IsTwoVersionInvariantViolationError(err) {
+ continue
+ } else {
+ if err == nil {
+ err = waitForDescriptors(withNewVersion, deletedDescs)
+ }
+ return err
+ }
+ }
+}
+
+// DescsTxn is similar to DescsTxnWithExecutor, but without an internal executor
+// involved.
+func (ief *InternalExecutorFactory) DescsTxn(
+ ctx context.Context,
+ db *kv.DB,
+ f func(context.Context, *kv.Txn, *descs.Collection) error,
+ opts ...sqlutil.TxnOption,
+) error {
+ return ief.DescsTxnWithExecutor(
+ ctx,
+ db,
+ nil, /* sessionData */
+ func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, _ sqlutil.InternalExecutor) error {
+ return f(ctx, txn, descriptors)
+ },
+ opts...,
+ )
+}
+
+// TxnWithExecutor is to run queries with internal executor in a transactional
+// manner.
+func (ief *InternalExecutorFactory) TxnWithExecutor(
+ ctx context.Context,
+ db *kv.DB,
+ sd *sessiondata.SessionData,
+ f func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error,
+ opts ...sqlutil.TxnOption,
+) error {
+ return ief.DescsTxnWithExecutor(
+ ctx,
+ db,
+ sd,
+ func(ctx context.Context, txn *kv.Txn, _ *descs.Collection, ie sqlutil.InternalExecutor) error {
+ return f(ctx, txn, ie)
+ },
+ opts...,
+ )
+}
diff --git a/pkg/sql/sqlutil/internal_executor.go b/pkg/sql/sqlutil/internal_executor.go
index 06f334b55072..8e41f04e3e45 100644
--- a/pkg/sql/sqlutil/internal_executor.go
+++ b/pkg/sql/sqlutil/internal_executor.go
@@ -217,6 +217,19 @@ type InternalExecutorFactory interface {
// RunWithoutTxn is to create an internal executor without binding to a txn,
// and run the passed function with this internal executor.
RunWithoutTxn(ctx context.Context, run func(ctx context.Context, ie InternalExecutor) error) error
+
+ // TxnWithExecutor enables callers to run transactions with a *Collection such that all
+ // retrieved immutable descriptors are properly leased and all mutable
+ // descriptors are handled. The function deals with verifying the two version
+ // invariant and retrying when it is violated. Callers need not worry that they
+ // write mutable descriptors multiple times. The call will explicitly wait for
+ // the leases to drain on old versions of descriptors modified or deleted in the
+ // transaction; callers do not need to call lease.WaitForOneVersion.
+ // It also enables using internal executor to run sql queries in a txn manner.
+ //
+ // The passed transaction is pre-emptively anchored to the system config key on
+ // the system tenant.
+ TxnWithExecutor(context.Context, *kv.DB, *sessiondata.SessionData, func(context.Context, *kv.Txn, InternalExecutor) error, ...TxnOption) error
}
// InternalExecFn is the type of functions that operates using an internalExecutor.
@@ -226,3 +239,37 @@ type InternalExecFn func(ctx context.Context, txn *kv.Txn, ie InternalExecutor)
// passes the fn the exported InternalExecutor instead of the whole unexported
// extendedEvalContenxt, so it can be implemented outside pkg/sql.
type HistoricalInternalExecTxnRunner func(ctx context.Context, fn InternalExecFn) error
+
+// TxnOption is used to configure a Txn or TxnWithExecutor.
+type TxnOption interface {
+ Apply(*TxnConfig)
+}
+
+// TxnConfig is the config to be set for txn.
+type TxnConfig struct {
+ steppingEnabled bool
+}
+
+// GetSteppingEnabled return the steppingEnabled setting from the txn config.
+func (tc *TxnConfig) GetSteppingEnabled() bool {
+ return tc.steppingEnabled
+}
+
+type txnOptionFn func(options *TxnConfig)
+
+// Apply is to apply the txn config.
+func (f txnOptionFn) Apply(options *TxnConfig) { f(options) }
+
+var steppingEnabled = txnOptionFn(func(o *TxnConfig) {
+ o.steppingEnabled = true
+})
+
+// SteppingEnabled creates a TxnOption to determine whether the underlying
+// transaction should have stepping enabled. If stepping is enabled, the
+// transaction will implicitly use lower admission priority. However, the
+// user will need to remember to Step the Txn to make writes visible. The
+// InternalExecutor will automatically (for better or for worse) step the
+// transaction when executing each statement.
+func SteppingEnabled() TxnOption {
+ return steppingEnabled
+}
From 4e00d8d245544e6b37fbb1879b0e1b30a0b645a5 Mon Sep 17 00:00:00 2001
From: Jane Xing
Date: Thu, 29 Sep 2022 17:12:27 -0500
Subject: [PATCH 07/16] *: replacing `descs.CollectionFactory` with
`descs.TxnManager`
This is part of commit for a mechanical move of the `TxnWithExecutor()` function
from the `descs` pkg to the `sql` package. We now use the `descs.TxnManager`
interface for the same logic.
Release note: None
---
pkg/ccl/backupccl/backup_metadata_test.go | 4 +-
pkg/ccl/backupccl/backup_test.go | 4 +-
.../backupccl/restore_data_processor_test.go | 2 +-
.../changefeedccl/sink_cloudstorage_test.go | 2 +-
pkg/ccl/storageccl/BUILD.bazel | 2 +-
.../storageccl/external_sst_reader_test.go | 4 +-
pkg/ccl/workloadccl/storage.go | 2 +-
pkg/cloud/BUILD.bazel | 1 -
pkg/cloud/amazon/s3_storage_test.go | 34 ++++---
pkg/cloud/azure/azure_storage_test.go | 4 +-
pkg/cloud/cloudtestutils/BUILD.bazel | 1 -
.../cloudtestutils/cloud_test_helpers.go | 35 ++++---
pkg/cloud/external_storage.go | 17 ++--
pkg/cloud/externalconn/connection_storage.go | 3 +-
pkg/cloud/gcp/gcs_storage_test.go | 88 +++++++++++------
pkg/cloud/httpsink/http_storage_test.go | 12 +--
pkg/cloud/impl_registry.go | 23 +++--
pkg/cloud/nodelocal/nodelocal_storage_test.go | 2 +-
pkg/cloud/nullsink/nullsink_storage_test.go | 2 +-
pkg/cloud/userfile/BUILD.bazel | 1 -
pkg/cloud/userfile/file_table_storage.go | 2 +-
pkg/cloud/userfile/file_table_storage_test.go | 22 ++---
.../filetable/file_table_read_writer.go | 92 +++++++++---------
.../filetable/filetabletest/BUILD.bazel | 2 +-
.../file_table_read_writer_test.go | 44 ++++++---
pkg/server/external_storage_builder.go | 14 +--
pkg/server/server.go | 8 +-
pkg/server/server_sql.go | 8 +-
pkg/server/tenant.go | 7 +-
pkg/server/testserver.go | 5 +
pkg/sql/conn_executor.go | 2 +-
pkg/sql/importer/BUILD.bazel | 1 +
pkg/sql/importer/import_processor_test.go | 2 +-
pkg/sql/importer/import_stmt_test.go | 7 +-
pkg/sql/temporary_schema.go | 94 ++++++++++---------
pkg/testutils/serverutils/test_server_shim.go | 4 +
36 files changed, 308 insertions(+), 249 deletions(-)
diff --git a/pkg/ccl/backupccl/backup_metadata_test.go b/pkg/ccl/backupccl/backup_metadata_test.go
index 47febbea773a..51225caa164f 100644
--- a/pkg/ccl/backupccl/backup_metadata_test.go
+++ b/pkg/ccl/backupccl/backup_metadata_test.go
@@ -25,7 +25,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
@@ -88,7 +88,7 @@ func checkMetadata(
blobs.TestEmptyBlobClientFactory,
username.RootUserName(),
tc.Servers[0].InternalExecutor().(*sql.InternalExecutor),
- tc.Servers[0].CollectionFactory().(*descs.CollectionFactory),
+ tc.Servers[0].InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
tc.Servers[0].DB(),
nil, /* limiters */
)
diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go
index 0d37b2b3d58b..52910f9c7980 100644
--- a/pkg/ccl/backupccl/backup_test.go
+++ b/pkg/ccl/backupccl/backup_test.go
@@ -573,7 +573,7 @@ func TestBackupRestoreAppend(t *testing.T) {
blobs.TestEmptyBlobClientFactory,
username.RootUserName(),
tc.Servers[0].InternalExecutor().(*sql.InternalExecutor),
- tc.Servers[0].CollectionFactory().(*descs.CollectionFactory),
+ tc.Servers[0].InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
tc.Servers[0].DB(),
nil, /* limiters */
)
@@ -8027,7 +8027,7 @@ func TestReadBackupManifestMemoryMonitoring(t *testing.T) {
blobs.TestBlobServiceClient(dir),
username.RootUserName(),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
diff --git a/pkg/ccl/backupccl/restore_data_processor_test.go b/pkg/ccl/backupccl/restore_data_processor_test.go
index 529a493e934d..306ba223b37b 100644
--- a/pkg/ccl/backupccl/restore_data_processor_test.go
+++ b/pkg/ccl/backupccl/restore_data_processor_test.go
@@ -255,7 +255,7 @@ func runTestIngest(t *testing.T, init func(*cluster.Settings)) {
return cloud.MakeExternalStorage(ctx, dest, base.ExternalIODirConfig{},
s.ClusterSettings(), blobs.TestBlobServiceClient(s.ClusterSettings().ExternalIODir),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
opts...)
diff --git a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go
index dbad01d047c4..e925476068ca 100644
--- a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go
+++ b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go
@@ -176,7 +176,7 @@ func TestCloudStorageSink(t *testing.T) {
clientFactory,
user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
opts...)
diff --git a/pkg/ccl/storageccl/BUILD.bazel b/pkg/ccl/storageccl/BUILD.bazel
index 6ed03b91393f..29df0c0170da 100644
--- a/pkg/ccl/storageccl/BUILD.bazel
+++ b/pkg/ccl/storageccl/BUILD.bazel
@@ -46,7 +46,7 @@ go_test(
"//pkg/security/username",
"//pkg/server",
"//pkg/sql",
- "//pkg/sql/catalog/descs",
+ "//pkg/sql/sqlutil",
"//pkg/storage",
"//pkg/testutils",
"//pkg/testutils/serverutils",
diff --git a/pkg/ccl/storageccl/external_sst_reader_test.go b/pkg/ccl/storageccl/external_sst_reader_test.go
index 9525ad84fd75..76b0fae120d0 100644
--- a/pkg/ccl/storageccl/external_sst_reader_test.go
+++ b/pkg/ccl/storageccl/external_sst_reader_test.go
@@ -20,7 +20,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/sql"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/storageutils"
@@ -124,7 +124,7 @@ func TestNewExternalSSTReader(t *testing.T) {
blobs.TestBlobServiceClient(tempDir),
username.RootUserName(),
tc.Servers[0].InternalExecutor().(*sql.InternalExecutor),
- tc.Servers[0].CollectionFactory().(*descs.CollectionFactory),
+ tc.Servers[0].InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
tc.Servers[0].DB(),
nil, /* limiters */
)
diff --git a/pkg/ccl/workloadccl/storage.go b/pkg/ccl/workloadccl/storage.go
index 6856a6e10f0d..3d34a7d936e3 100644
--- a/pkg/ccl/workloadccl/storage.go
+++ b/pkg/ccl/workloadccl/storage.go
@@ -43,7 +43,7 @@ func GetStorage(ctx context.Context, cfg FixtureConfig) (cloud.ExternalStorage,
nil, /* blobClientFactory */
username.SQLUsername{},
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
diff --git a/pkg/cloud/BUILD.bazel b/pkg/cloud/BUILD.bazel
index d80a2d38922a..130caa7f945e 100644
--- a/pkg/cloud/BUILD.bazel
+++ b/pkg/cloud/BUILD.bazel
@@ -23,7 +23,6 @@ go_library(
"//pkg/security/username",
"//pkg/settings",
"//pkg/settings/cluster",
- "//pkg/sql/catalog/descs",
"//pkg/sql/sqlutil",
"//pkg/util/ctxgroup",
"//pkg/util/ioctx",
diff --git a/pkg/cloud/amazon/s3_storage_test.go b/pkg/cloud/amazon/s3_storage_test.go
index ac983b9ef0ea..397168657fa8 100644
--- a/pkg/cloud/amazon/s3_storage_test.go
+++ b/pkg/cloud/amazon/s3_storage_test.go
@@ -48,7 +48,7 @@ func makeS3Storage(
s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings,
clientFactory,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -83,7 +83,7 @@ func TestPutS3(t *testing.T) {
"backup-test-default"), base.ExternalIODirConfig{}, testSettings,
blobs.TestEmptyBlobClientFactory, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -112,7 +112,7 @@ func TestPutS3(t *testing.T) {
cloud.AuthParam, cloud.AuthParamImplicit,
), false, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings)
})
@@ -122,11 +122,15 @@ func TestPutS3(t *testing.T) {
)
cloudtestutils.CheckExportStore(t, uri, false, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
- cloudtestutils.CheckListFiles(t, uri, user, nil, nil, nil, testSettings)
+ cloudtestutils.CheckListFiles(t, uri, user,
+ nil, /* ie */
+ nil, /* ief */
+ nil, /* kvDB */
+ testSettings)
})
// Tests that we can put an object with server side encryption specified.
@@ -151,7 +155,7 @@ func TestPutS3(t *testing.T) {
false,
user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -169,7 +173,7 @@ func TestPutS3(t *testing.T) {
false,
user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings)
})
@@ -242,13 +246,13 @@ func TestPutS3AssumeRole(t *testing.T) {
)
cloudtestutils.CheckExportStore(t, uri, false, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
cloudtestutils.CheckListFiles(t, uri, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -260,13 +264,13 @@ func TestPutS3AssumeRole(t *testing.T) {
)
cloudtestutils.CheckExportStore(t, uri, false, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
cloudtestutils.CheckListFiles(t, uri, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -301,7 +305,7 @@ func TestPutS3AssumeRole(t *testing.T) {
)
cloudtestutils.CheckNoPermission(t, roleURI, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -321,7 +325,7 @@ func TestPutS3AssumeRole(t *testing.T) {
cloudtestutils.CheckExportStore(t, uri, false, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -365,7 +369,7 @@ func TestPutS3Endpoint(t *testing.T) {
cloudtestutils.CheckExportStore(t, u.String(), false, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -441,7 +445,7 @@ func TestS3BucketDoesNotExist(t *testing.T) {
s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings,
clientFactory,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
diff --git a/pkg/cloud/azure/azure_storage_test.go b/pkg/cloud/azure/azure_storage_test.go
index 9517afa25e16..40cc14e7a59a 100644
--- a/pkg/cloud/azure/azure_storage_test.go
+++ b/pkg/cloud/azure/azure_storage_test.go
@@ -69,13 +69,13 @@ func TestAzure(t *testing.T) {
cloudtestutils.CheckExportStore(t, cfg.filePath("backup-test"),
false, username.RootUserName(),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
cloudtestutils.CheckListFiles(t, cfg.filePath("listing-test"), username.RootUserName(),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
diff --git a/pkg/cloud/cloudtestutils/BUILD.bazel b/pkg/cloud/cloudtestutils/BUILD.bazel
index d99cbebc2797..db24a23ab80b 100644
--- a/pkg/cloud/cloudtestutils/BUILD.bazel
+++ b/pkg/cloud/cloudtestutils/BUILD.bazel
@@ -14,7 +14,6 @@ go_library(
"//pkg/kv",
"//pkg/security/username",
"//pkg/settings/cluster",
- "//pkg/sql/catalog/descs",
"//pkg/sql/sqlutil",
"//pkg/util/ioctx",
"//pkg/util/randutil",
diff --git a/pkg/cloud/cloudtestutils/cloud_test_helpers.go b/pkg/cloud/cloudtestutils/cloud_test_helpers.go
index 6a7028418569..ea3f1cc14957 100644
--- a/pkg/cloud/cloudtestutils/cloud_test_helpers.go
+++ b/pkg/cloud/cloudtestutils/cloud_test_helpers.go
@@ -32,7 +32,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/util/ioctx"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
@@ -109,7 +108,7 @@ func storeFromURI(
clientFactory blobs.BlobClientFactory,
user username.SQLUsername,
ie sqlutil.InternalExecutor,
- cf *descs.CollectionFactory,
+ ief sqlutil.InternalExecutorFactory,
kvDB *kv.DB,
testSettings *cluster.Settings,
) cloud.ExternalStorage {
@@ -119,7 +118,7 @@ func storeFromURI(
}
// Setup a sink for the given args.
s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings,
- clientFactory, ie, cf, kvDB, nil)
+ clientFactory, ie, ief, kvDB, nil)
if err != nil {
t.Fatal(err)
}
@@ -133,7 +132,7 @@ func CheckExportStore(
skipSingleFile bool,
user username.SQLUsername,
ie sqlutil.InternalExecutor,
- cf *descs.CollectionFactory,
+ ief sqlutil.InternalExecutorFactory,
kvDB *kv.DB,
testSettings *cluster.Settings,
) {
@@ -148,7 +147,7 @@ func CheckExportStore(
// Setup a sink for the given args.
clientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir)
s, err := cloud.MakeExternalStorage(ctx, conf, ioConf, testSettings, clientFactory,
- ie, cf, kvDB, nil)
+ ie, ief, kvDB, nil)
if err != nil {
t.Fatal(err)
}
@@ -256,7 +255,7 @@ func CheckExportStore(
t.Fatal(err)
}
singleFile := storeFromURI(ctx, t, appendPath(t, storeURI, testingFilename), clientFactory,
- user, ie, cf, kvDB, testSettings)
+ user, ie, ief, kvDB, testSettings)
defer singleFile.Close()
res, err := singleFile.ReadFile(ctx, "")
@@ -277,7 +276,7 @@ func CheckExportStore(
t.Run("write-single-file-by-uri", func(t *testing.T) {
const testingFilename = "B"
singleFile := storeFromURI(ctx, t, appendPath(t, storeURI, testingFilename), clientFactory,
- user, ie, cf, kvDB, testSettings)
+ user, ie, ief, kvDB, testSettings)
defer singleFile.Close()
if err := cloud.WriteFile(ctx, singleFile, "", bytes.NewReader([]byte("bbb"))); err != nil {
@@ -308,7 +307,7 @@ func CheckExportStore(
if err := cloud.WriteFile(ctx, s, testingFilename, bytes.NewReader([]byte("aaa"))); err != nil {
t.Fatal(err)
}
- singleFile := storeFromURI(ctx, t, storeURI, clientFactory, user, ie, cf, kvDB, testSettings)
+ singleFile := storeFromURI(ctx, t, storeURI, clientFactory, user, ie, ief, kvDB, testSettings)
defer singleFile.Close()
// Read a valid file.
@@ -350,11 +349,11 @@ func CheckListFiles(
storeURI string,
user username.SQLUsername,
ie sqlutil.InternalExecutor,
- cf *descs.CollectionFactory,
+ ief sqlutil.InternalExecutorFactory,
kvDB *kv.DB,
testSettings *cluster.Settings,
) {
- CheckListFilesCanonical(t, storeURI, "", user, ie, cf, kvDB, testSettings)
+ CheckListFilesCanonical(t, storeURI, "", user, ie, ief, kvDB, testSettings)
}
// CheckListFilesCanonical is like CheckListFiles but takes a canonical prefix
@@ -366,7 +365,7 @@ func CheckListFilesCanonical(
canonical string,
user username.SQLUsername,
ie sqlutil.InternalExecutor,
- cf *descs.CollectionFactory,
+ ief sqlutil.InternalExecutorFactory,
kvDB *kv.DB,
testSettings *cluster.Settings,
) {
@@ -380,7 +379,7 @@ func CheckListFilesCanonical(
clientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir)
for _, fileName := range fileNames {
- file := storeFromURI(ctx, t, storeURI, clientFactory, user, ie, cf, kvDB, testSettings)
+ file := storeFromURI(ctx, t, storeURI, clientFactory, user, ie, ief, kvDB, testSettings)
if err := cloud.WriteFile(ctx, file, fileName, bytes.NewReader([]byte("bbb"))); err != nil {
t.Fatal(err)
}
@@ -468,7 +467,7 @@ func CheckListFilesCanonical(
},
} {
t.Run(tc.name, func(t *testing.T) {
- s := storeFromURI(ctx, t, tc.uri, clientFactory, user, ie, cf, kvDB, testSettings)
+ s := storeFromURI(ctx, t, tc.uri, clientFactory, user, ie, ief, kvDB, testSettings)
var actual []string
require.NoError(t, s.List(ctx, tc.prefix, tc.delimiter, func(f string) error {
actual = append(actual, f)
@@ -481,7 +480,7 @@ func CheckListFilesCanonical(
})
for _, fileName := range fileNames {
- file := storeFromURI(ctx, t, storeURI, clientFactory, user, ie, cf, kvDB, testSettings)
+ file := storeFromURI(ctx, t, storeURI, clientFactory, user, ie, ief, kvDB, testSettings)
if err := file.Delete(ctx, fileName); err != nil {
t.Fatal(err)
}
@@ -502,7 +501,7 @@ func uploadData(
s, err := cloud.MakeExternalStorage(ctx, dest, base.ExternalIODirConfig{}, testSettings,
nil, /* blobClientFactory */
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -547,7 +546,7 @@ func CheckAntagonisticRead(
s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings,
nil, /* blobClientFactory */
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -569,7 +568,7 @@ func CheckNoPermission(
storeURI string,
user username.SQLUsername,
ie sqlutil.InternalExecutor,
- cf *descs.CollectionFactory,
+ ief sqlutil.InternalExecutorFactory,
kvDB *kv.DB,
testSettings *cluster.Settings,
) {
@@ -582,7 +581,7 @@ func CheckNoPermission(
}
clientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir)
- s, err := cloud.MakeExternalStorage(ctx, conf, ioConf, testSettings, clientFactory, ie, cf, kvDB, nil)
+ s, err := cloud.MakeExternalStorage(ctx, conf, ioConf, testSettings, clientFactory, ie, ief, kvDB, nil)
if err != nil {
t.Fatal(err)
}
diff --git a/pkg/cloud/external_storage.go b/pkg/cloud/external_storage.go
index 901118ce0a03..8ab8d2230eff 100644
--- a/pkg/cloud/external_storage.go
+++ b/pkg/cloud/external_storage.go
@@ -22,7 +22,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/util/ioctx"
"github.com/cockroachdb/errors"
@@ -150,14 +149,14 @@ type ExternalStorageURIParser func(ExternalStorageURIContext, *url.URL) (cloudpb
// ExternalStorageContext contains the dependencies passed to external storage
// implementations during creation.
type ExternalStorageContext struct {
- IOConf base.ExternalIODirConfig
- Settings *cluster.Settings
- BlobClientFactory blobs.BlobClientFactory
- InternalExecutor sqlutil.InternalExecutor
- CollectionFactory *descs.CollectionFactory
- DB *kv.DB
- Options []ExternalStorageOption
- Limiters Limiters
+ IOConf base.ExternalIODirConfig
+ Settings *cluster.Settings
+ BlobClientFactory blobs.BlobClientFactory
+ InternalExecutor sqlutil.InternalExecutor
+ InternalExecutorFactory sqlutil.InternalExecutorFactory
+ DB *kv.DB
+ Options []ExternalStorageOption
+ Limiters Limiters
}
// ExternalStorageOptions holds dependencies and values that can be
diff --git a/pkg/cloud/externalconn/connection_storage.go b/pkg/cloud/externalconn/connection_storage.go
index e98078e6e2eb..96309cdf75a9 100644
--- a/pkg/cloud/externalconn/connection_storage.go
+++ b/pkg/cloud/externalconn/connection_storage.go
@@ -92,7 +92,8 @@ func makeExternalConnectionStorage(
uri.Path = path.Join(uri.Path, cfg.Path)
return cloud.ExternalStorageFromURI(ctx, uri.String(), args.IOConf, args.Settings,
args.BlobClientFactory, username.MakeSQLUsernameFromPreNormalizedString(cfg.User),
- args.InternalExecutor, args.CollectionFactory, args.DB, args.Limiters, args.Options...)
+ args.InternalExecutor, args.InternalExecutorFactory,
+ args.DB, args.Limiters, args.Options...)
default:
return nil, errors.Newf("cannot connect to %T; unsupported resource for an ExternalStorage connection", d)
}
diff --git a/pkg/cloud/gcp/gcs_storage_test.go b/pkg/cloud/gcp/gcs_storage_test.go
index cb0e302c451d..29e8456e33af 100644
--- a/pkg/cloud/gcp/gcs_storage_test.go
+++ b/pkg/cloud/gcp/gcs_storage_test.go
@@ -60,7 +60,16 @@ func TestPutGoogleCloud(t *testing.T) {
if specified {
uri += fmt.Sprintf("&%s=%s", cloud.AuthParam, cloud.AuthParamSpecified)
}
- cloudtestutils.CheckExportStore(t, uri, false, user, nil, nil, nil, testSettings)
+ cloudtestutils.CheckExportStore(
+ t,
+ uri,
+ false,
+ user,
+ nil, /* ie */
+ nil, /* ief */
+ nil, /* kvDB */
+ testSettings,
+ )
cloudtestutils.CheckListFiles(t, fmt.Sprintf("gs://%s/%s/%s?%s=%s&%s=%s",
bucket,
"backup-test-specified",
@@ -71,7 +80,7 @@ func TestPutGoogleCloud(t *testing.T) {
url.QueryEscape(encoded),
), username.RootUserName(),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -81,8 +90,16 @@ func TestPutGoogleCloud(t *testing.T) {
skip.IgnoreLint(t, "implicit auth is not configured")
}
- cloudtestutils.CheckExportStore(t, fmt.Sprintf("gs://%s/%s?%s=%s", bucket, "backup-test-implicit",
- cloud.AuthParam, cloud.AuthParamImplicit), false, user, nil, nil, nil, testSettings)
+ cloudtestutils.CheckExportStore(
+ t,
+ fmt.Sprintf("gs://%s/%s?%s=%s", bucket, "backup-test-implicit",
+ cloud.AuthParam, cloud.AuthParamImplicit),
+ false,
+ user,
+ nil, /* ie */
+ nil, /* ief */
+ nil, /* kvDB */
+ testSettings)
cloudtestutils.CheckListFiles(t, fmt.Sprintf("gs://%s/%s/%s?%s=%s",
bucket,
"backup-test-implicit",
@@ -91,7 +108,7 @@ func TestPutGoogleCloud(t *testing.T) {
cloud.AuthParamImplicit,
), username.RootUserName(),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -118,7 +135,15 @@ func TestPutGoogleCloud(t *testing.T) {
token.AccessToken,
)
uri += fmt.Sprintf("&%s=%s", cloud.AuthParam, cloud.AuthParamSpecified)
- cloudtestutils.CheckExportStore(t, uri, false, user, nil, nil, nil, testSettings)
+ cloudtestutils.CheckExportStore(
+ t,
+ uri,
+ false,
+ user,
+ nil, /* ie */
+ nil, /* ief */
+ nil, /* kvDB */
+ testSettings)
cloudtestutils.CheckListFiles(t, fmt.Sprintf("gs://%s/%s/%s?%s=%s&%s=%s",
bucket,
"backup-test-specified",
@@ -129,7 +154,7 @@ func TestPutGoogleCloud(t *testing.T) {
token.AccessToken,
), username.RootUserName(),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -161,20 +186,27 @@ func TestGCSAssumeRole(t *testing.T) {
cloudtestutils.CheckNoPermission(t, fmt.Sprintf("gs://%s/%s?%s=%s", limitedBucket, "backup-test-assume-role",
CredentialsParam, url.QueryEscape(encoded)), user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
- cloudtestutils.CheckExportStore(t, fmt.Sprintf("gs://%s/%s?%s=%s&%s=%s&%s=%s",
- limitedBucket,
- "backup-test-assume-role",
- cloud.AuthParam,
- cloud.AuthParamSpecified,
- AssumeRoleParam,
- assumedAccount, CredentialsParam,
- url.QueryEscape(encoded),
- ), false, user, nil, nil, nil, testSettings)
+ cloudtestutils.CheckExportStore(
+ t,
+ fmt.Sprintf("gs://%s/%s?%s=%s&%s=%s&%s=%s",
+ limitedBucket,
+ "backup-test-assume-role",
+ cloud.AuthParam,
+ cloud.AuthParamSpecified,
+ AssumeRoleParam,
+ assumedAccount, CredentialsParam,
+ url.QueryEscape(encoded),
+ ), false, user,
+ nil, /* ie */
+ nil, /* ief */
+ nil, /* kvDB */
+ testSettings,
+ )
cloudtestutils.CheckListFiles(t, fmt.Sprintf("gs://%s/%s/%s?%s=%s&%s=%s&%s=%s",
limitedBucket,
"backup-test-assume-role",
@@ -187,7 +219,7 @@ func TestGCSAssumeRole(t *testing.T) {
url.QueryEscape(encoded),
), username.RootUserName(),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -203,7 +235,7 @@ func TestGCSAssumeRole(t *testing.T) {
cloudtestutils.CheckNoPermission(t, fmt.Sprintf("gs://%s/%s?%s=%s", limitedBucket, "backup-test-assume-role",
cloud.AuthParam, cloud.AuthParamImplicit), user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -211,7 +243,7 @@ func TestGCSAssumeRole(t *testing.T) {
cloudtestutils.CheckExportStore(t, fmt.Sprintf("gs://%s/%s?%s=%s&%s=%s", limitedBucket, "backup-test-assume-role",
cloud.AuthParam, cloud.AuthParamImplicit, AssumeRoleParam, assumedAccount), false, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -225,7 +257,7 @@ func TestGCSAssumeRole(t *testing.T) {
assumedAccount,
), username.RootUserName(),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -269,7 +301,7 @@ func TestGCSAssumeRole(t *testing.T) {
)
cloudtestutils.CheckNoPermission(t, roleURI, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -285,13 +317,13 @@ func TestGCSAssumeRole(t *testing.T) {
)
cloudtestutils.CheckExportStore(t, uri, false, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
cloudtestutils.CheckListFiles(t, uri, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -339,7 +371,7 @@ func TestFileDoesNotExist(t *testing.T) {
s, err := cloud.MakeExternalStorage(context.Background(), conf, base.ExternalIODirConfig{}, testSettings,
nil, /* blobClientFactory */
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -358,7 +390,7 @@ func TestFileDoesNotExist(t *testing.T) {
s, err := cloud.MakeExternalStorage(context.Background(), conf, base.ExternalIODirConfig{}, testSettings,
nil, /* blobClientFactory */
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -395,7 +427,7 @@ func TestCompressedGCS(t *testing.T) {
s1, err := cloud.MakeExternalStorage(ctx, conf1, base.ExternalIODirConfig{}, testSettings,
nil, /* blobClientFactory */
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -403,7 +435,7 @@ func TestCompressedGCS(t *testing.T) {
s2, err := cloud.MakeExternalStorage(ctx, conf2, base.ExternalIODirConfig{}, testSettings,
nil, /* blobClientFactory */
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
diff --git a/pkg/cloud/httpsink/http_storage_test.go b/pkg/cloud/httpsink/http_storage_test.go
index 6e8823be4b3c..de84d4e7617b 100644
--- a/pkg/cloud/httpsink/http_storage_test.go
+++ b/pkg/cloud/httpsink/http_storage_test.go
@@ -123,7 +123,7 @@ func TestPutHttp(t *testing.T) {
defer cleanup()
cloudtestutils.CheckExportStore(t, srv.String(), false, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -145,7 +145,7 @@ func TestPutHttp(t *testing.T) {
cloudtestutils.CheckExportStore(t, combined.String(), true, user,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
@@ -173,7 +173,7 @@ func TestPutHttp(t *testing.T) {
}
s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings, blobs.TestEmptyBlobClientFactory,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -331,7 +331,7 @@ func TestCanDisableHttp(t *testing.T) {
s, err := cloud.MakeExternalStorage(context.Background(), cloudpb.ExternalStorage{Provider: cloudpb.ExternalStorageProvider_http}, conf, testSettings, blobs.TestEmptyBlobClientFactory,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -354,7 +354,7 @@ func TestCanDisableOutbound(t *testing.T) {
} {
s, err := cloud.MakeExternalStorage(context.Background(), cloudpb.ExternalStorage{Provider: provider}, conf, testSettings, blobs.TestEmptyBlobClientFactory,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
@@ -388,7 +388,7 @@ func TestExternalStorageCanUseHTTPProxy(t *testing.T) {
require.NoError(t, err)
s, err := cloud.MakeExternalStorage(context.Background(), conf, base.ExternalIODirConfig{}, testSettings, nil,
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
diff --git a/pkg/cloud/impl_registry.go b/pkg/cloud/impl_registry.go
index 065258c0bd85..83139747d245 100644
--- a/pkg/cloud/impl_registry.go
+++ b/pkg/cloud/impl_registry.go
@@ -25,7 +25,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/util/ioctx"
"github.com/cockroachdb/cockroach/pkg/util/log"
@@ -152,7 +151,7 @@ func ExternalStorageFromURI(
blobClientFactory blobs.BlobClientFactory,
user username.SQLUsername,
ie sqlutil.InternalExecutor,
- cf *descs.CollectionFactory,
+ ief sqlutil.InternalExecutorFactory,
kvDB *kv.DB,
limiters Limiters,
opts ...ExternalStorageOption,
@@ -162,7 +161,7 @@ func ExternalStorageFromURI(
return nil, err
}
return MakeExternalStorage(ctx, conf, externalConfig, settings, blobClientFactory,
- ie, cf, kvDB, limiters, opts...)
+ ie, ief, kvDB, limiters, opts...)
}
// SanitizeExternalStorageURI returns the external storage URI with with some
@@ -207,20 +206,20 @@ func MakeExternalStorage(
settings *cluster.Settings,
blobClientFactory blobs.BlobClientFactory,
ie sqlutil.InternalExecutor,
- cf *descs.CollectionFactory,
+ ief sqlutil.InternalExecutorFactory,
kvDB *kv.DB,
limiters Limiters,
opts ...ExternalStorageOption,
) (ExternalStorage, error) {
args := ExternalStorageContext{
- IOConf: conf,
- Settings: settings,
- BlobClientFactory: blobClientFactory,
- InternalExecutor: ie,
- CollectionFactory: cf,
- DB: kvDB,
- Options: opts,
- Limiters: limiters,
+ IOConf: conf,
+ Settings: settings,
+ BlobClientFactory: blobClientFactory,
+ InternalExecutor: ie,
+ InternalExecutorFactory: ief,
+ DB: kvDB,
+ Options: opts,
+ Limiters: limiters,
}
if conf.DisableOutbound && dest.Provider != cloudpb.ExternalStorageProvider_userfile {
return nil, errors.New("external network access is disabled")
diff --git a/pkg/cloud/nodelocal/nodelocal_storage_test.go b/pkg/cloud/nodelocal/nodelocal_storage_test.go
index 4792c601b8e9..cfc62c526c3f 100644
--- a/pkg/cloud/nodelocal/nodelocal_storage_test.go
+++ b/pkg/cloud/nodelocal/nodelocal_storage_test.go
@@ -33,7 +33,7 @@ func TestPutLocal(t *testing.T) {
cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), nil, nil, nil, testSettings)
cloudtestutils.CheckListFiles(t, "nodelocal://0/listing-test/basepath", username.RootUserName(),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
testSettings,
)
diff --git a/pkg/cloud/nullsink/nullsink_storage_test.go b/pkg/cloud/nullsink/nullsink_storage_test.go
index 04f898ab33d6..cb2d2a0ac316 100644
--- a/pkg/cloud/nullsink/nullsink_storage_test.go
+++ b/pkg/cloud/nullsink/nullsink_storage_test.go
@@ -40,7 +40,7 @@ func TestNullSinkReadAndWrite(t *testing.T) {
nil, /* Cluster Settings */
nil, /* blobClientFactory */
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
diff --git a/pkg/cloud/userfile/BUILD.bazel b/pkg/cloud/userfile/BUILD.bazel
index da46209f30b1..05ca28916e74 100644
--- a/pkg/cloud/userfile/BUILD.bazel
+++ b/pkg/cloud/userfile/BUILD.bazel
@@ -46,7 +46,6 @@ go_test(
"//pkg/security/username",
"//pkg/server",
"//pkg/settings/cluster",
- "//pkg/sql/catalog/descs",
"//pkg/sql/sem/tree",
"//pkg/sql/sqlutil",
"//pkg/sql/tests",
diff --git a/pkg/cloud/userfile/file_table_storage.go b/pkg/cloud/userfile/file_table_storage.go
index c30b9fe7c06a..96dbcd8d98f2 100644
--- a/pkg/cloud/userfile/file_table_storage.go
+++ b/pkg/cloud/userfile/file_table_storage.go
@@ -122,7 +122,7 @@ func makeFileTableStorage(
// cfg.User is already a normalized SQL username.
user := username.MakeSQLUsernameFromPreNormalizedString(cfg.User)
- executor := filetable.MakeInternalFileToTableExecutor(args.InternalExecutor, args.CollectionFactory, args.DB)
+ executor := filetable.MakeInternalFileToTableExecutor(args.InternalExecutor, args.InternalExecutorFactory, args.DB)
fileToTableSystem, err := filetable.NewFileToTableSystem(ctx,
cfg.QualifiedTableName, executor, user)
diff --git a/pkg/cloud/userfile/file_table_storage_test.go b/pkg/cloud/userfile/file_table_storage_test.go
index 940d009a61b0..e7dfcc03e581 100644
--- a/pkg/cloud/userfile/file_table_storage_test.go
+++ b/pkg/cloud/userfile/file_table_storage_test.go
@@ -23,7 +23,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/cloud/cloudtestutils"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/sql/tests"
@@ -49,21 +48,20 @@ func TestPutUserFileTable(t *testing.T) {
dest := MakeUserFileStorageURI(qualifiedTableName, filename)
ie := s.InternalExecutor().(sqlutil.InternalExecutor)
- cf := s.CollectionFactory().(*descs.CollectionFactory)
- cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), ie, cf, kvDB, testSettings)
+ ief := s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory)
+ cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), ie, ief, kvDB, testSettings)
cloudtestutils.CheckListFiles(t, "userfile://defaultdb.public.file_list_table/listing-test/basepath",
- username.RootUserName(), ie, cf, kvDB, testSettings)
+ username.RootUserName(), ie, ief, kvDB, testSettings)
t.Run("empty-qualified-table-name", func(t *testing.T) {
dest := MakeUserFileStorageURI("", filename)
ie := s.InternalExecutor().(sqlutil.InternalExecutor)
- cf := s.CollectionFactory().(*descs.CollectionFactory)
- cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), ie, cf, kvDB, testSettings)
+ cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), ie, ief, kvDB, testSettings)
cloudtestutils.CheckListFilesCanonical(t, "userfile:///listing-test/basepath", "userfile://defaultdb.public.userfiles_root/listing-test/basepath",
- username.RootUserName(), ie, cf, kvDB, testSettings)
+ username.RootUserName(), ie, ief, kvDB, testSettings)
})
t.Run("reject-normalized-basename", func(t *testing.T) {
@@ -71,7 +69,7 @@ func TestPutUserFileTable(t *testing.T) {
userfileURL := url.URL{Scheme: "userfile", Host: qualifiedTableName, Path: ""}
store, err := cloud.ExternalStorageFromURI(ctx, userfileURL.String()+"/",
- base.ExternalIODirConfig{}, cluster.NoSettings, blobs.TestEmptyBlobClientFactory, username.RootUserName(), ie, cf, kvDB, nil)
+ base.ExternalIODirConfig{}, cluster.NoSettings, blobs.TestEmptyBlobClientFactory, username.RootUserName(), ie, ief, kvDB, nil)
require.NoError(t, err)
defer store.Close()
@@ -109,7 +107,7 @@ func TestUserScoping(t *testing.T) {
dest := MakeUserFileStorageURI(qualifiedTableName, "")
ie := s.InternalExecutor().(sqlutil.InternalExecutor)
- cf := s.CollectionFactory().(*descs.CollectionFactory)
+ ief := s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory)
// Create two users and grant them all privileges on defaultdb.
user1 := username.MakeSQLUsernameFromPreNormalizedString("foo")
@@ -119,13 +117,13 @@ func TestUserScoping(t *testing.T) {
// Write file as user1.
fileTableSystem1, err := cloud.ExternalStorageFromURI(ctx, dest, base.ExternalIODirConfig{},
- cluster.NoSettings, blobs.TestEmptyBlobClientFactory, user1, ie, cf, kvDB, nil)
+ cluster.NoSettings, blobs.TestEmptyBlobClientFactory, user1, ie, ief, kvDB, nil)
require.NoError(t, err)
require.NoError(t, cloud.WriteFile(ctx, fileTableSystem1, filename, bytes.NewReader([]byte("aaa"))))
// Attempt to read/write file as user2 and expect to fail.
fileTableSystem2, err := cloud.ExternalStorageFromURI(ctx, dest, base.ExternalIODirConfig{},
- cluster.NoSettings, blobs.TestEmptyBlobClientFactory, user2, ie, cf, kvDB, nil)
+ cluster.NoSettings, blobs.TestEmptyBlobClientFactory, user2, ie, ief, kvDB, nil)
require.NoError(t, err)
_, err = fileTableSystem2.ReadFile(ctx, filename)
require.Error(t, err)
@@ -133,7 +131,7 @@ func TestUserScoping(t *testing.T) {
// Read file as root and expect to succeed.
fileTableSystem3, err := cloud.ExternalStorageFromURI(ctx, dest, base.ExternalIODirConfig{},
- cluster.NoSettings, blobs.TestEmptyBlobClientFactory, username.RootUserName(), ie, cf, kvDB, nil)
+ cluster.NoSettings, blobs.TestEmptyBlobClientFactory, username.RootUserName(), ie, ief, kvDB, nil)
require.NoError(t, err)
_, err = fileTableSystem3.ReadFile(ctx, filename)
require.NoError(t, err)
diff --git a/pkg/cloud/userfile/filetable/file_table_read_writer.go b/pkg/cloud/userfile/filetable/file_table_read_writer.go
index 182c6720112f..1f8f16f78f09 100644
--- a/pkg/cloud/userfile/filetable/file_table_read_writer.go
+++ b/pkg/cloud/userfile/filetable/file_table_read_writer.go
@@ -60,9 +60,9 @@ type FileToTableSystemExecutor interface {
// InternalFileToTableExecutor is the SQL query executor which uses an internal
// SQL connection to interact with the database.
type InternalFileToTableExecutor struct {
- ie sqlutil.InternalExecutor
- cf *descs.CollectionFactory
- db *kv.DB
+ ie sqlutil.InternalExecutor
+ ief sqlutil.InternalExecutorFactory
+ db *kv.DB
}
var _ FileToTableSystemExecutor = &InternalFileToTableExecutor{}
@@ -70,9 +70,9 @@ var _ FileToTableSystemExecutor = &InternalFileToTableExecutor{}
// MakeInternalFileToTableExecutor returns an instance of a
// InternalFileToTableExecutor.
func MakeInternalFileToTableExecutor(
- ie sqlutil.InternalExecutor, cf *descs.CollectionFactory, db *kv.DB,
+ ie sqlutil.InternalExecutor, ief sqlutil.InternalExecutorFactory, db *kv.DB,
) *InternalFileToTableExecutor {
- return &InternalFileToTableExecutor{ie, cf, db}
+ return &InternalFileToTableExecutor{ie, ief, db}
}
// Query implements the FileToTableSystemExecutor interface.
@@ -245,32 +245,33 @@ func NewFileToTableSystem(
if err != nil {
return nil, err
}
- if err := e.cf.TxnWithExecutor(ctx, e.db, nil /* SessionData */, func(
- ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
- ) error {
- // TODO(adityamaru): Handle scenario where the user has already created
- // tables with the same names not via the FileToTableSystem
- // object. Not sure if we want to error out or work around it.
- tablesExist, err := f.checkIfFileAndPayloadTableExist(ctx, txn, ie)
- if err != nil {
- return err
- }
-
- if !tablesExist {
- if err := f.createFileAndPayloadTables(ctx, txn, ie); err != nil {
+ if err := e.ief.(descs.InternalExecutorFactoryWithTxn).DescsTxnWithExecutor(
+ ctx, e.db, nil /* SessionData */, func(
+ ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
+ ) error {
+ // TODO(adityamaru): Handle scenario where the user has already created
+ // tables with the same names not via the FileToTableSystem
+ // object. Not sure if we want to error out or work around it.
+ tablesExist, err := f.checkIfFileAndPayloadTableExist(ctx, txn, ie)
+ if err != nil {
return err
}
- if err := f.grantCurrentUserTablePrivileges(ctx, txn, ie); err != nil {
- return err
- }
+ if !tablesExist {
+ if err := f.createFileAndPayloadTables(ctx, txn, ie); err != nil {
+ return err
+ }
- if err := f.revokeOtherUserTablePrivileges(ctx, txn, ie); err != nil {
- return err
+ if err := f.grantCurrentUserTablePrivileges(ctx, txn, ie); err != nil {
+ return err
+ }
+
+ if err := f.revokeOtherUserTablePrivileges(ctx, txn, ie); err != nil {
+ return err
+ }
}
- }
- return nil
- }); err != nil {
+ return nil
+ }); err != nil {
return nil, err
}
@@ -366,27 +367,28 @@ func DestroyUserFileSystem(ctx context.Context, f *FileToTableSystem) error {
return err
}
- if err := e.cf.TxnWithExecutor(ctx, e.db, nil, func(
- ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
- ) error {
- dropPayloadTableQuery := fmt.Sprintf(`DROP TABLE %s`, f.GetFQPayloadTableName())
- _, err := ie.ExecEx(ctx, "drop-payload-table", txn,
- sessiondata.InternalExecutorOverride{User: f.username},
- dropPayloadTableQuery)
- if err != nil {
- return errors.Wrap(err, "failed to drop payload table")
- }
+ if err := e.ief.(descs.InternalExecutorFactoryWithTxn).DescsTxnWithExecutor(
+ ctx, e.db, nil /* sd */, func(
+ ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
+ ) error {
+ dropPayloadTableQuery := fmt.Sprintf(`DROP TABLE %s`, f.GetFQPayloadTableName())
+ _, err := ie.ExecEx(ctx, "drop-payload-table", txn,
+ sessiondata.InternalExecutorOverride{User: f.username},
+ dropPayloadTableQuery)
+ if err != nil {
+ return errors.Wrap(err, "failed to drop payload table")
+ }
- dropFileTableQuery := fmt.Sprintf(`DROP TABLE %s CASCADE`, f.GetFQFileTableName())
- _, err = ie.ExecEx(ctx, "drop-file-table", txn,
- sessiondata.InternalExecutorOverride{User: f.username},
- dropFileTableQuery)
- if err != nil {
- return errors.Wrap(err, "failed to drop file table")
- }
+ dropFileTableQuery := fmt.Sprintf(`DROP TABLE %s CASCADE`, f.GetFQFileTableName())
+ _, err = ie.ExecEx(ctx, "drop-file-table", txn,
+ sessiondata.InternalExecutorOverride{User: f.username},
+ dropFileTableQuery)
+ if err != nil {
+ return errors.Wrap(err, "failed to drop file table")
+ }
- return nil
- }); err != nil {
+ return nil
+ }); err != nil {
return err
}
diff --git a/pkg/cloud/userfile/filetable/filetabletest/BUILD.bazel b/pkg/cloud/userfile/filetable/filetabletest/BUILD.bazel
index e99f83fa5def..8dab3835fc1d 100644
--- a/pkg/cloud/userfile/filetable/filetabletest/BUILD.bazel
+++ b/pkg/cloud/userfile/filetable/filetabletest/BUILD.bazel
@@ -17,7 +17,7 @@ go_test(
"//pkg/security/username",
"//pkg/server",
"//pkg/sql",
- "//pkg/sql/catalog/descs",
+ "//pkg/sql/sqlutil",
"//pkg/sql/tests",
"//pkg/testutils",
"//pkg/testutils/serverutils",
diff --git a/pkg/cloud/userfile/filetable/filetabletest/file_table_read_writer_test.go b/pkg/cloud/userfile/filetable/filetabletest/file_table_read_writer_test.go
index d5ad2777b2b6..f368af7abe1a 100644
--- a/pkg/cloud/userfile/filetable/filetabletest/file_table_read_writer_test.go
+++ b/pkg/cloud/userfile/filetable/filetabletest/file_table_read_writer_test.go
@@ -22,7 +22,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/sql"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/sql/tests"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
@@ -107,8 +107,11 @@ func TestListAndDeleteFiles(t *testing.T) {
s, _, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop(ctx)
- executor := filetable.MakeInternalFileToTableExecutor(s.InternalExecutor().(*sql.
- InternalExecutor), s.CollectionFactory().(*descs.CollectionFactory), kvDB)
+ executor := filetable.MakeInternalFileToTableExecutor(
+ s.InternalExecutor().(*sql.InternalExecutor),
+ s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
+ kvDB,
+ )
fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName,
executor, username.RootUserName())
require.NoError(t, err)
@@ -158,8 +161,11 @@ func TestReadWriteFile(t *testing.T) {
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop(ctx)
- executor := filetable.MakeInternalFileToTableExecutor(s.InternalExecutor().(*sql.
- InternalExecutor), s.CollectionFactory().(*descs.CollectionFactory), kvDB)
+ executor := filetable.MakeInternalFileToTableExecutor(
+ s.InternalExecutor().(*sql.InternalExecutor),
+ s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
+ kvDB,
+ )
fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName,
executor, username.RootUserName())
require.NoError(t, err)
@@ -341,8 +347,11 @@ func TestUserGrants(t *testing.T) {
require.NoError(t, err)
// Operate under non-admin user.
- executor := filetable.MakeInternalFileToTableExecutor(s.InternalExecutor().(*sql.
- InternalExecutor), s.CollectionFactory().(*descs.CollectionFactory), kvDB)
+ executor := filetable.MakeInternalFileToTableExecutor(
+ s.InternalExecutor().(*sql.InternalExecutor),
+ s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
+ kvDB,
+ )
johnUser := username.MakeSQLUsernameFromPreNormalizedString("john")
fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName,
executor, johnUser)
@@ -425,8 +434,11 @@ func TestDifferentUserDisallowed(t *testing.T) {
require.NoError(t, err)
// Operate under non-admin user john.
- executor := filetable.MakeInternalFileToTableExecutor(s.InternalExecutor().(*sql.
- InternalExecutor), s.CollectionFactory().(*descs.CollectionFactory), kvDB)
+ executor := filetable.MakeInternalFileToTableExecutor(
+ s.InternalExecutor().(*sql.InternalExecutor),
+ s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
+ kvDB,
+ )
johnUser := username.MakeSQLUsernameFromPreNormalizedString("john")
fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName,
executor, johnUser)
@@ -483,8 +495,11 @@ func TestDifferentRoleDisallowed(t *testing.T) {
require.NoError(t, err)
// Operate under non-admin user john.
- executor := filetable.MakeInternalFileToTableExecutor(s.InternalExecutor().(*sql.
- InternalExecutor), s.CollectionFactory().(*descs.CollectionFactory), kvDB)
+ executor := filetable.MakeInternalFileToTableExecutor(
+ s.InternalExecutor().(*sql.InternalExecutor),
+ s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
+ kvDB,
+ )
johnUser := username.MakeSQLUsernameFromPreNormalizedString("john")
fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName,
executor, johnUser)
@@ -518,8 +533,11 @@ func TestDatabaseScope(t *testing.T) {
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop(ctx)
- executor := filetable.MakeInternalFileToTableExecutor(s.InternalExecutor().(*sql.
- InternalExecutor), s.CollectionFactory().(*descs.CollectionFactory), kvDB)
+ executor := filetable.MakeInternalFileToTableExecutor(
+ s.InternalExecutor().(*sql.InternalExecutor),
+ s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
+ kvDB,
+ )
fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName,
executor, username.RootUserName())
require.NoError(t, err)
diff --git a/pkg/server/external_storage_builder.go b/pkg/server/external_storage_builder.go
index d2da4d4da08f..70b758dc00de 100644
--- a/pkg/server/external_storage_builder.go
+++ b/pkg/server/external_storage_builder.go
@@ -24,7 +24,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/errors"
)
@@ -39,7 +39,7 @@ type externalStorageBuilder struct {
blobClientFactory blobs.BlobClientFactory
initCalled bool
ie *sql.InternalExecutor
- cf *descs.CollectionFactory
+ ief sqlutil.InternalExecutorFactory
db *kv.DB
limiters cloud.Limiters
recorder multitenant.TenantSideExternalIORecorder
@@ -53,7 +53,7 @@ func (e *externalStorageBuilder) init(
nodeDialer *nodedialer.Dialer,
testingKnobs base.TestingKnobs,
ie *sql.InternalExecutor,
- cf *descs.CollectionFactory,
+ ief sqlutil.InternalExecutorFactory,
db *kv.DB,
recorder multitenant.TenantSideExternalIORecorder,
) {
@@ -69,7 +69,7 @@ func (e *externalStorageBuilder) init(
e.blobClientFactory = blobClientFactory
e.initCalled = true
e.ie = ie
- e.cf = cf
+ e.ief = ief
e.db = db
e.limiters = cloud.MakeLimiters(ctx, &settings.SV)
e.recorder = recorder
@@ -81,8 +81,8 @@ func (e *externalStorageBuilder) makeExternalStorage(
if !e.initCalled {
return nil, errors.New("cannot create external storage before init")
}
- return cloud.MakeExternalStorage(ctx, dest, e.conf, e.settings, e.blobClientFactory, e.ie,
- e.cf, e.db, e.limiters, append(e.defaultOptions(), opts...)...)
+ return cloud.MakeExternalStorage(ctx, dest, e.conf, e.settings, e.blobClientFactory, e.ie, e.ief,
+ e.db, e.limiters, append(e.defaultOptions(), opts...)...)
}
func (e *externalStorageBuilder) makeExternalStorageFromURI(
@@ -92,7 +92,7 @@ func (e *externalStorageBuilder) makeExternalStorageFromURI(
return nil, errors.New("cannot create external storage before init")
}
return cloud.ExternalStorageFromURI(ctx, uri, e.conf, e.settings, e.blobClientFactory,
- user, e.ie, e.cf, e.db, e.limiters, append(e.defaultOptions(), opts...)...)
+ user, e.ie, e.ief, e.db, e.limiters, append(e.defaultOptions(), opts...)...)
}
func (e *externalStorageBuilder) defaultOptions() []cloud.ExternalStorageOption {
diff --git a/pkg/server/server.go b/pkg/server/server.go
index 9aa0414e1980..8f0acf1c0519 100644
--- a/pkg/server/server.go
+++ b/pkg/server/server.go
@@ -64,7 +64,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigkvsubscriber"
"github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigptsreader"
"github.com/cockroachdb/cockroach/pkg/sql"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
_ "github.com/cockroachdb/cockroach/pkg/sql/catalog/schematelemetry" // register schedules declared outside of pkg/sql
"github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema"
"github.com/cockroachdb/cockroach/pkg/sql/flowinfra"
@@ -483,8 +482,8 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) {
// which in turn needs many things. That's why everybody that needs an
// InternalExecutor uses this one instance.
internalExecutor := &sql.InternalExecutor{}
+ internalExecutorFactory := &sql.InternalExecutorFactory{}
jobRegistry := &jobs.Registry{} // ditto
- collectionFactory := &descs.CollectionFactory{}
// Create an ExternalStorageBuilder. This is only usable after Start() where
// we initialize all the configuration params.
@@ -834,8 +833,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) {
closedSessionCache: closedSessionCache,
flowScheduler: flowScheduler,
circularInternalExecutor: internalExecutor,
- collectionFactory: collectionFactory,
- internalExecutorFactory: nil, // will be initialized in server.newSQLServer.
+ internalExecutorFactory: internalExecutorFactory,
circularJobRegistry: jobRegistry,
jobAdoptionStopFile: jobAdoptionStopFile,
protectedtsProvider: protectedtsProvider,
@@ -1082,7 +1080,7 @@ func (s *Server) PreStart(ctx context.Context) error {
s.nodeDialer,
s.cfg.TestingKnobs,
&fileTableInternalExecutor,
- s.sqlServer.execCfg.CollectionFactory,
+ s.sqlServer.execCfg.InternalExecutorFactory,
s.db,
nil, /* TenantExternalIORecorder */
)
diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go
index 3ca860a2d729..e40477851526 100644
--- a/pkg/server/server_sql.go
+++ b/pkg/server/server_sql.go
@@ -306,10 +306,8 @@ type sqlServerArgs struct {
// TODO(tbg): make this less hacky.
circularInternalExecutor *sql.InternalExecutor // empty initially
- collectionFactory *descs.CollectionFactory
-
// internalExecutorFactory is to initialize an internal executor.
- internalExecutorFactory sqlutil.InternalExecutorFactory
+ internalExecutorFactory *sql.InternalExecutorFactory
// Stores and deletes expired liveness sessions.
sqlLivenessProvider sqlliveness.Provider
@@ -990,8 +988,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) {
cfg.registry.AddMetricStruct(m)
}
*cfg.circularInternalExecutor = sql.MakeInternalExecutor(pgServer.SQLServer, internalMemMetrics, ieFactoryMonitor)
- *cfg.collectionFactory = *collectionFactory
- cfg.internalExecutorFactory = ieFactory
+ *cfg.internalExecutorFactory = *ieFactory
execCfg.InternalExecutor = cfg.circularInternalExecutor
stmtDiagnosticsRegistry := stmtdiagnostics.NewRegistry(
@@ -1088,6 +1085,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) {
cfg.sqlStatusServer,
cfg.isMeta1Leaseholder,
sqlExecutorTestingKnobs,
+ ieFactory,
collectionFactory,
)
diff --git a/pkg/server/tenant.go b/pkg/server/tenant.go
index e83e31883b9d..08230213edf0 100644
--- a/pkg/server/tenant.go
+++ b/pkg/server/tenant.go
@@ -42,7 +42,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/server/systemconfigwatcher"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/flowinfra"
"github.com/cockroachdb/cockroach/pkg/sql/optionalnodeliveness"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
@@ -500,8 +499,8 @@ func makeTenantSQLServerArgs(
)
circularInternalExecutor := &sql.InternalExecutor{}
+ internalExecutorFactory := &sql.InternalExecutorFactory{}
circularJobRegistry := &jobs.Registry{}
- collectionFactory := &descs.CollectionFactory{}
// Initialize the protectedts subsystem in multi-tenant clusters.
var protectedTSProvider protectedts.Provider
@@ -541,7 +540,7 @@ func makeTenantSQLServerArgs(
nodeDialer,
baseCfg.TestingKnobs,
circularInternalExecutor,
- collectionFactory,
+ internalExecutorFactory,
db,
costController,
)
@@ -610,7 +609,7 @@ func makeTenantSQLServerArgs(
sessionRegistry: sessionRegistry,
flowScheduler: flowScheduler,
circularInternalExecutor: circularInternalExecutor,
- collectionFactory: collectionFactory,
+ internalExecutorFactory: internalExecutorFactory,
circularJobRegistry: circularJobRegistry,
protectedtsProvider: protectedTSProvider,
rangeFeedFactory: rangeFeedFactory,
diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go
index 07df983ad22c..646a69c1bc80 100644
--- a/pkg/server/testserver.go
+++ b/pkg/server/testserver.go
@@ -1107,6 +1107,11 @@ func (ts *TestServer) InternalExecutor() interface{} {
return ts.sqlServer.internalExecutor
}
+// InternalExecutorFactory is part of TestServerInterface.
+func (ts *TestServer) InternalExecutorFactory() interface{} {
+ return ts.sqlServer.internalExecutorFactory
+}
+
// GetNode exposes the Server's Node.
func (ts *TestServer) GetNode() *Node {
return ts.node
diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go
index 91ead9cc1415..ec023733f6ec 100644
--- a/pkg/sql/conn_executor.go
+++ b/pkg/sql/conn_executor.go
@@ -1119,7 +1119,7 @@ func (ex *connExecutor) close(ctx context.Context, closeType closeType) {
err := cleanupSessionTempObjects(
ctx,
ex.server.cfg.Settings,
- ex.server.cfg.CollectionFactory,
+ ex.server.cfg.InternalExecutorFactory,
ex.server.cfg.DB,
ex.server.cfg.Codec,
ex.sessionID,
diff --git a/pkg/sql/importer/BUILD.bazel b/pkg/sql/importer/BUILD.bazel
index 096007ca1f44..c15bf93ea286 100644
--- a/pkg/sql/importer/BUILD.bazel
+++ b/pkg/sql/importer/BUILD.bazel
@@ -202,6 +202,7 @@ go_test(
"//pkg/sql/sem/eval",
"//pkg/sql/sem/tree",
"//pkg/sql/sessiondata",
+ "//pkg/sql/sqlutil",
"//pkg/sql/stats",
"//pkg/sql/tests",
"//pkg/sql/types",
diff --git a/pkg/sql/importer/import_processor_test.go b/pkg/sql/importer/import_processor_test.go
index eade356105f7..e0b15e3230a6 100644
--- a/pkg/sql/importer/import_processor_test.go
+++ b/pkg/sql/importer/import_processor_test.go
@@ -881,7 +881,7 @@ func externalStorageFactory(
return cloud.MakeExternalStorage(ctx, dest, base.ExternalIODirConfig{},
nil, blobs.TestBlobServiceClient(workdir),
nil, /* ie */
- nil, /* cf */
+ nil, /* ief */
nil, /* kvDB */
nil, /* limiters */
)
diff --git a/pkg/sql/importer/import_stmt_test.go b/pkg/sql/importer/import_stmt_test.go
index 5196278dab04..66bf29231c61 100644
--- a/pkg/sql/importer/import_stmt_test.go
+++ b/pkg/sql/importer/import_stmt_test.go
@@ -55,6 +55,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/tests"
"github.com/cockroachdb/cockroach/pkg/storage"
@@ -2702,9 +2703,9 @@ func TestImportObjectLevelRBAC(t *testing.T) {
writeToUserfile := func(filename, data string) {
// Write to userfile storage now that testuser has CREATE privileges.
ie := tc.Server(0).InternalExecutor().(*sql.InternalExecutor)
- cf := tc.Server(0).CollectionFactory().(*descs.CollectionFactory)
+ ief := tc.Server(0).InternalExecutorFactory().(sqlutil.InternalExecutorFactory)
fileTableSystem1, err := cloud.ExternalStorageFromURI(ctx, dest, base.ExternalIODirConfig{},
- cluster.NoSettings, blobs.TestEmptyBlobClientFactory, username.TestUserName(), ie, cf, tc.Server(0).DB(), nil)
+ cluster.NoSettings, blobs.TestEmptyBlobClientFactory, username.TestUserName(), ie, ief, tc.Server(0).DB(), nil)
require.NoError(t, err)
require.NoError(t, cloud.WriteFile(ctx, fileTableSystem1, filename, bytes.NewReader([]byte(data))))
}
@@ -5852,7 +5853,7 @@ func TestImportPgDumpIgnoredStmts(t *testing.T) {
blobs.TestEmptyBlobClientFactory,
username.RootUserName(),
tc.Server(0).InternalExecutor().(*sql.InternalExecutor),
- tc.Server(0).CollectionFactory().(*descs.CollectionFactory),
+ tc.Server(0).InternalExecutorFactory().(sqlutil.InternalExecutorFactory),
tc.Server(0).DB(),
nil,
)
diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go
index 0bf2615bf43e..4342de9f6b1b 100644
--- a/pkg/sql/temporary_schema.go
+++ b/pkg/sql/temporary_schema.go
@@ -169,45 +169,46 @@ func temporarySchemaSessionID(scName string) (bool, clusterunique.ID, error) {
func cleanupSessionTempObjects(
ctx context.Context,
settings *cluster.Settings,
- cf *descs.CollectionFactory,
+ ief sqlutil.InternalExecutorFactory,
db *kv.DB,
codec keys.SQLCodec,
sessionID clusterunique.ID,
) error {
tempSchemaName := temporarySchemaName(sessionID)
- return cf.TxnWithExecutor(ctx, db, nil /* sessionData */, func(
- ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
- ie sqlutil.InternalExecutor,
- ) error {
- // We are going to read all database descriptor IDs, then for each database
- // we will drop all the objects under the temporary schema.
- allDbDescs, err := descsCol.GetAllDatabaseDescriptors(ctx, txn)
- if err != nil {
- return err
- }
- for _, dbDesc := range allDbDescs {
- if err := cleanupSchemaObjects(
- ctx,
- txn,
- descsCol,
- codec,
- ie,
- dbDesc,
- tempSchemaName,
- ); err != nil {
+ return ief.(descs.InternalExecutorFactoryWithTxn).DescsTxnWithExecutor(
+ ctx, db, nil /* sessionData */, func(
+ ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
+ ie sqlutil.InternalExecutor,
+ ) error {
+ // We are going to read all database descriptor IDs, then for each database
+ // we will drop all the objects under the temporary schema.
+ allDbDescs, err := descsCol.GetAllDatabaseDescriptors(ctx, txn)
+ if err != nil {
return err
}
- // Even if no objects were found under the temporary schema, the schema
- // itself may still exist (eg. a temporary table was created and then
- // dropped). So we remove the namespace table entry of the temporary
- // schema.
- key := catalogkeys.MakeSchemaNameKey(codec, dbDesc.GetID(), tempSchemaName)
- if _, err := txn.Del(ctx, key); err != nil {
- return err
+ for _, dbDesc := range allDbDescs {
+ if err := cleanupSchemaObjects(
+ ctx,
+ txn,
+ descsCol,
+ codec,
+ ie,
+ dbDesc,
+ tempSchemaName,
+ ); err != nil {
+ return err
+ }
+ // Even if no objects were found under the temporary schema, the schema
+ // itself may still exist (eg. a temporary table was created and then
+ // dropped). So we remove the namespace table entry of the temporary
+ // schema.
+ key := catalogkeys.MakeSchemaNameKey(codec, dbDesc.GetID(), tempSchemaName)
+ if _, err := txn.Del(ctx, key); err != nil {
+ return err
+ }
}
- }
- return nil
- })
+ return nil
+ })
}
// cleanupSchemaObjects removes all objects that is located within a dbID and schema.
@@ -400,11 +401,12 @@ type TemporaryObjectCleaner struct {
db *kv.DB
codec keys.SQLCodec
// statusServer gives access to the SQLStatus service.
- statusServer serverpb.SQLStatusServer
- isMeta1LeaseholderFunc isMeta1LeaseholderFunc
- testingKnobs ExecutorTestingKnobs
- metrics *temporaryObjectCleanerMetrics
- collectionFactory *descs.CollectionFactory
+ statusServer serverpb.SQLStatusServer
+ isMeta1LeaseholderFunc isMeta1LeaseholderFunc
+ testingKnobs ExecutorTestingKnobs
+ metrics *temporaryObjectCleanerMetrics
+ collectionFactory *descs.CollectionFactory
+ internalExecutorFactory sqlutil.InternalExecutorFactory
}
// temporaryObjectCleanerMetrics are the metrics for TemporaryObjectCleaner
@@ -430,19 +432,21 @@ func NewTemporaryObjectCleaner(
statusServer serverpb.SQLStatusServer,
isMeta1LeaseholderFunc isMeta1LeaseholderFunc,
testingKnobs ExecutorTestingKnobs,
+ ief sqlutil.InternalExecutorFactory,
cf *descs.CollectionFactory,
) *TemporaryObjectCleaner {
metrics := makeTemporaryObjectCleanerMetrics()
registry.AddMetricStruct(metrics)
return &TemporaryObjectCleaner{
- settings: settings,
- db: db,
- codec: codec,
- statusServer: statusServer,
- isMeta1LeaseholderFunc: isMeta1LeaseholderFunc,
- testingKnobs: testingKnobs,
- metrics: metrics,
- collectionFactory: cf,
+ settings: settings,
+ db: db,
+ codec: codec,
+ statusServer: statusServer,
+ isMeta1LeaseholderFunc: isMeta1LeaseholderFunc,
+ testingKnobs: testingKnobs,
+ metrics: metrics,
+ internalExecutorFactory: ief,
+ collectionFactory: cf,
}
}
@@ -590,7 +594,7 @@ func (c *TemporaryObjectCleaner) doTemporaryObjectCleanup(
return cleanupSessionTempObjects(
ctx,
c.settings,
- c.collectionFactory,
+ c.internalExecutorFactory,
c.db,
c.codec,
sessionID,
diff --git a/pkg/testutils/serverutils/test_server_shim.go b/pkg/testutils/serverutils/test_server_shim.go
index 72900afae110..9f7aa32f21f7 100644
--- a/pkg/testutils/serverutils/test_server_shim.go
+++ b/pkg/testutils/serverutils/test_server_shim.go
@@ -146,6 +146,10 @@ type TestServerInterface interface {
// also implements sqlutil.InternalExecutor if the test cannot depend on sql).
InternalExecutor() interface{}
+ // InternalExecutorInternalExecutorFactory returns a
+ // sqlutil.InternalExecutorFactory as an interface{}.
+ InternalExecutorFactory() interface{}
+
// TracerI returns a *tracing.Tracer as an interface{}.
TracerI() interface{}
From 4954b7017aefd9aad6659a57033f966f83a94c14 Mon Sep 17 00:00:00 2001
From: Jane Xing
Date: Thu, 29 Sep 2022 22:07:23 -0500
Subject: [PATCH 08/16] *: replace `descs.CollectionFactory.Txn()` and
`.TxnWithExecutor()` with new interfaces
We now use `descs.TxnManager.DescsWithTxn()` and
`.DescsTxnWithExecutor()` to replace `descs.CollectionFactory.Txn()`
and `.TxnWithExecutor()`.
Release note: None
---
pkg/ccl/backupccl/restore_job.go | 2 +-
pkg/ccl/backupccl/restore_planning.go | 2 +-
.../changefeedccl/schemafeed/schema_feed.go | 26 ++--
.../filetable/file_table_read_writer.go | 4 +-
pkg/server/api_v2_sql.go | 9 +-
pkg/server/server_sql.go | 9 +-
pkg/server/status.go | 2 +-
pkg/sql/authorization_test.go | 4 +-
pkg/sql/catalog/descs/BUILD.bazel | 1 -
pkg/sql/catalog/descs/collection_test.go | 2 +-
pkg/sql/catalog/descs/factory.go | 26 +---
pkg/sql/catalog/descs/system_table.go | 14 +-
pkg/sql/catalog/descs/txn.go | 146 ------------------
pkg/sql/catalog/descs/txn_external_test.go | 7 +-
.../txn_with_executor_datadriven_test.go | 4 +-
pkg/sql/exec_util.go | 8 +-
pkg/sql/execinfra/server_config.go | 2 +-
pkg/sql/internal.go | 121 ++++++---------
pkg/sql/schema_change_plan_node.go | 2 +-
pkg/sql/schema_changer.go | 6 +-
pkg/sql/schemachanger/scdeps/run_deps.go | 63 ++++----
.../scexec/executor_external_test.go | 4 +-
pkg/sql/schemachanger/scjob/job.go | 1 +
pkg/sql/sessioninit/BUILD.bazel | 2 -
pkg/sql/sessioninit/cache.go | 23 ++-
pkg/sql/sessioninit/cache_test.go | 24 ++-
pkg/sql/stats/automatic_stats.go | 2 +-
pkg/sql/stats/automatic_stats_test.go | 14 +-
pkg/sql/stats/delete_stats_test.go | 4 +-
pkg/sql/stats/stats_cache.go | 15 +-
pkg/sql/stats/stats_cache_test.go | 8 +-
pkg/sql/temporary_schema.go | 2 +-
pkg/sql/temporary_schema_test.go | 4 +-
pkg/sql/user.go | 73 +++++----
pkg/upgrade/tenant_upgrade.go | 17 +-
pkg/upgrade/upgradejob/upgrade_job.go | 19 +--
pkg/upgrade/upgrades/descriptor_utils.go | 2 +-
pkg/upgrade/upgrades/helpers_test.go | 4 +-
pkg/upgrade/upgrades/schema_changes.go | 2 +-
.../upgrades/schema_changes_external_test.go | 2 +-
..._column_ids_in_sequence_back_references.go | 2 +-
...upgrade_sequence_to_be_referenced_by_ID.go | 4 +-
42 files changed, 252 insertions(+), 436 deletions(-)
diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go
index 6f91d17a07c7..2a360bfaf42e 100644
--- a/pkg/ccl/backupccl/restore_job.go
+++ b/pkg/ccl/backupccl/restore_job.go
@@ -2223,7 +2223,7 @@ func (r *restoreResumer) OnFailOrCancel(
logJobCompletion(ctx, restoreJobEventType, r.job.ID(), false, jobErr)
execCfg := execCtx.(sql.JobExecContext).ExecCfg()
- if err := execCfg.CollectionFactory.TxnWithExecutor(ctx, execCfg.DB, p.SessionData(), func(
+ if err := execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, execCfg.DB, p.SessionData(), func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ie sqlutil.InternalExecutor,
) error {
for _, tenant := range details.Tenants {
diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go
index 061afb7c38c7..2c38c27f71ec 100644
--- a/pkg/ccl/backupccl/restore_planning.go
+++ b/pkg/ccl/backupccl/restore_planning.go
@@ -706,7 +706,7 @@ func getDatabaseIDAndDesc(
// as regular databases, we drop them before restoring them again in the
// restore.
func dropDefaultUserDBs(ctx context.Context, execCfg *sql.ExecutorConfig) error {
- return execCfg.CollectionFactory.TxnWithExecutor(ctx, execCfg.DB, nil /* session data */, func(
+ return execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, execCfg.DB, nil /* session data */, func(
ctx context.Context, txn *kv.Txn, _ *descs.Collection, ie sqlutil.InternalExecutor,
) error {
_, err := ie.Exec(ctx, "drop-defaultdb", txn, "DROP DATABASE IF EXISTS defaultdb")
diff --git a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go
index e1dcce9884c6..29a8078adabe 100644
--- a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go
+++ b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go
@@ -84,15 +84,16 @@ func New(
tolerances changefeedbase.CanHandle,
) SchemaFeed {
m := &schemaFeed{
- filter: schemaChangeEventFilters[events],
- db: cfg.DB,
- clock: cfg.DB.Clock(),
- settings: cfg.Settings,
- targets: targets,
- leaseMgr: cfg.LeaseManager.(*lease.Manager),
- collectionFactory: cfg.CollectionFactory,
- metrics: metrics,
- tolerances: tolerances,
+ filter: schemaChangeEventFilters[events],
+ db: cfg.DB,
+ clock: cfg.DB.Clock(),
+ settings: cfg.Settings,
+ targets: targets,
+ leaseMgr: cfg.LeaseManager.(*lease.Manager),
+ collectionFactory: cfg.CollectionFactory,
+ internalExecutorFactory: cfg.InternalExecutorFactory,
+ metrics: metrics,
+ tolerances: tolerances,
}
m.mu.previousTableVersion = make(map[descpb.ID]catalog.TableDescriptor)
m.mu.highWater = initialHighwater
@@ -122,8 +123,9 @@ type schemaFeed struct {
// TODO(ajwerner): Should this live underneath the FilterFunc?
// Should there be another function to decide whether to update the
// lease manager?
- leaseMgr *lease.Manager
- collectionFactory *descs.CollectionFactory
+ leaseMgr *lease.Manager
+ collectionFactory *descs.CollectionFactory
+ internalExecutorFactory descs.TxnManager
mu struct {
syncutil.Mutex
@@ -291,7 +293,7 @@ func (tf *schemaFeed) primeInitialTableDescs(ctx context.Context) error {
})
}
- if err := tf.collectionFactory.Txn(ctx, tf.db, initialTableDescsFn); err != nil {
+ if err := tf.internalExecutorFactory.DescsTxn(ctx, tf.db, initialTableDescsFn); err != nil {
return err
}
diff --git a/pkg/cloud/userfile/filetable/file_table_read_writer.go b/pkg/cloud/userfile/filetable/file_table_read_writer.go
index 1f8f16f78f09..8c80b27d850f 100644
--- a/pkg/cloud/userfile/filetable/file_table_read_writer.go
+++ b/pkg/cloud/userfile/filetable/file_table_read_writer.go
@@ -245,7 +245,7 @@ func NewFileToTableSystem(
if err != nil {
return nil, err
}
- if err := e.ief.(descs.InternalExecutorFactoryWithTxn).DescsTxnWithExecutor(
+ if err := e.ief.(descs.TxnManager).DescsTxnWithExecutor(
ctx, e.db, nil /* SessionData */, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
) error {
@@ -367,7 +367,7 @@ func DestroyUserFileSystem(ctx context.Context, f *FileToTableSystem) error {
return err
}
- if err := e.ief.(descs.InternalExecutorFactoryWithTxn).DescsTxnWithExecutor(
+ if err := e.ief.(descs.TxnManager).DescsTxnWithExecutor(
ctx, e.db, nil /* sd */, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
) error {
diff --git a/pkg/server/api_v2_sql.go b/pkg/server/api_v2_sql.go
index 7e57d076a9c6..93f69d9a5589 100644
--- a/pkg/server/api_v2_sql.go
+++ b/pkg/server/api_v2_sql.go
@@ -21,7 +21,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
@@ -373,13 +372,13 @@ func (a *apiV2Server) execSQL(w http.ResponseWriter, r *http.Request) {
// We need a transaction to group the statements together.
// We use TxnWithSteppingEnabled here even though we don't
// use stepping below, because that buys us admission control.
- cf := a.admin.server.sqlServer.execCfg.CollectionFactory
+ ief := a.admin.server.sqlServer.execCfg.InternalExecutorFactory
runner = func(ctx context.Context, fn txnFunc) error {
- return cf.TxnWithExecutor(ctx, a.admin.server.db, nil, func(
- ctx context.Context, txn *kv.Txn, _ *descs.Collection, ie sqlutil.InternalExecutor,
+ return ief.TxnWithExecutor(ctx, a.admin.server.db, nil /* sessionData */, func(
+ ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor,
) error {
return fn(ctx, txn, ie)
- }, descs.SteppingEnabled())
+ }, sqlutil.SteppingEnabled())
}
} else {
runner = func(ctx context.Context, fn func(context.Context, *kv.Txn, sqlutil.InternalExecutor) error) error {
diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go
index e40477851526..86a4d4322856 100644
--- a/pkg/server/server_sql.go
+++ b/pkg/server/server_sql.go
@@ -94,7 +94,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slprovider"
"github.com/cockroachdb/cockroach/pkg/sql/sqlstats"
- "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/stmtdiagnostics"
"github.com/cockroachdb/cockroach/pkg/startupmigrations"
@@ -140,7 +139,7 @@ type SQLServer struct {
execCfg *sql.ExecutorConfig
cfg *BaseConfig
internalExecutor *sql.InternalExecutor
- internalExecutorFactory sqlutil.InternalExecutorFactory
+ internalExecutorFactory descs.TxnManager
leaseMgr *lease.Manager
blobService *blobs.Service
tracingService *service.Service
@@ -818,7 +817,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) {
cfg.db,
cfg.circularInternalExecutor,
cfg.Settings,
- collectionFactory,
+ cfg.internalExecutorFactory,
),
QueryCache: querycache.New(cfg.QueryCacheSize),
@@ -829,7 +828,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) {
GCJobNotifier: gcJobNotifier,
RangeFeedFactory: cfg.rangeFeedFactory,
CollectionFactory: collectionFactory,
- SystemTableIDResolver: descs.MakeSystemTableIDResolver(collectionFactory, cfg.db),
+ SystemTableIDResolver: descs.MakeSystemTableIDResolver(collectionFactory, cfg.internalExecutorFactory, cfg.db),
ConsistencyChecker: consistencychecker.NewConsistencyChecker(cfg.db),
RangeProber: rangeprober.NewRangeProber(cfg.db),
DescIDGenerator: descidgen.NewGenerator(codec, cfg.db),
@@ -965,8 +964,6 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) {
ieFactoryMonitor,
)
- collectionFactory.SetInternalExecutorWithTxn(ieFactory)
-
distSQLServer.ServerConfig.InternalExecutorFactory = ieFactory
jobRegistry.SetInternalExecutorFactory(ieFactory)
execCfg.IndexBackfiller = sql.NewIndexBackfiller(execCfg)
diff --git a/pkg/server/status.go b/pkg/server/status.go
index fbedc6c9ea15..0a8f8b2c71b3 100644
--- a/pkg/server/status.go
+++ b/pkg/server/status.go
@@ -2428,7 +2428,7 @@ func (s *statusServer) HotRangesV2(
schemaName = meta.(tableMeta).schemaName
indexName = meta.(tableMeta).indexName
} else {
- if err = s.sqlServer.distSQLServer.CollectionFactory.TxnWithExecutor(
+ if err = s.sqlServer.distSQLServer.InternalExecutorFactory.DescsTxnWithExecutor(
ctx, s.db, nil, func(ctx context.Context, txn *kv.Txn, col *descs.Collection, ie sqlutil.InternalExecutor) error {
commonLookupFlags := tree.CommonLookupFlags{
Required: false,
diff --git a/pkg/sql/authorization_test.go b/pkg/sql/authorization_test.go
index c628a982382f..383725a14052 100644
--- a/pkg/sql/authorization_test.go
+++ b/pkg/sql/authorization_test.go
@@ -40,9 +40,9 @@ func TestCheckAnyPrivilegeForNodeUser(t *testing.T) {
require.NotNil(t, ts.InternalExecutor())
- cf := ts.CollectionFactory().(*descs.CollectionFactory)
+ ief := ts.InternalExecutorFactory().(descs.TxnManager)
- if err := cf.TxnWithExecutor(ctx, s.DB(), nil, func(
+ if err := ief.DescsTxnWithExecutor(ctx, s.DB(), nil /* sessionData */, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
) error {
row, err := ie.QueryRowEx(
diff --git a/pkg/sql/catalog/descs/BUILD.bazel b/pkg/sql/catalog/descs/BUILD.bazel
index 52d4a1e9d4ec..4f4fdb62fa01 100644
--- a/pkg/sql/catalog/descs/BUILD.bazel
+++ b/pkg/sql/catalog/descs/BUILD.bazel
@@ -55,7 +55,6 @@ go_library(
"//pkg/sql/sem/catconstants",
"//pkg/sql/sem/tree",
"//pkg/sql/sessiondata",
- "//pkg/sql/sessiondatapb",
"//pkg/sql/sqlerrors",
"//pkg/sql/sqlliveness",
"//pkg/sql/sqlutil",
diff --git a/pkg/sql/catalog/descs/collection_test.go b/pkg/sql/catalog/descs/collection_test.go
index e75b9afd560d..3a7df5677c33 100644
--- a/pkg/sql/catalog/descs/collection_test.go
+++ b/pkg/sql/catalog/descs/collection_test.go
@@ -1036,7 +1036,7 @@ SELECT id
ec := s.ExecutorConfig().(sql.ExecutorConfig)
codec := ec.Codec
descIDGen := ec.DescIDGenerator
- require.NoError(t, s.CollectionFactory().(*descs.CollectionFactory).TxnWithExecutor(ctx, s.DB(), nil /* sessionData */, func(
+ require.NoError(t, ec.InternalExecutorFactory.DescsTxnWithExecutor(ctx, s.DB(), nil /* sessionData */, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
) error {
checkImmutableDescriptor := func(id descpb.ID, expName string, f func(t *testing.T, desc catalog.Descriptor)) error {
diff --git a/pkg/sql/catalog/descs/factory.go b/pkg/sql/catalog/descs/factory.go
index 401cc6a5ae99..32fb3bf1040e 100644
--- a/pkg/sql/catalog/descs/factory.go
+++ b/pkg/sql/catalog/descs/factory.go
@@ -15,7 +15,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
- "github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
@@ -38,7 +37,6 @@ type CollectionFactory struct {
spanConfigSplitter spanconfig.Splitter
spanConfigLimiter spanconfig.Limiter
defaultMonitor *mon.BytesMonitor
- ieFactoryWithTxn InternalExecutorFactoryWithTxn
}
// GetClusterSettings returns the cluster setting from the collection factory.
@@ -46,18 +44,10 @@ func (cf *CollectionFactory) GetClusterSettings() *cluster.Settings {
return cf.settings
}
-// InternalExecutorFactoryWithTxn is used to create an internal executor
-// with associated extra txn state information.
-// It should only be used as a field hanging off CollectionFactory.
-type InternalExecutorFactoryWithTxn interface {
- MemoryMonitor() *mon.BytesMonitor
-
- NewInternalExecutorWithTxn(
- sd *sessiondata.SessionData,
- sv *settings.Values,
- txn *kv.Txn,
- descCol *Collection,
- ) (sqlutil.InternalExecutor, InternalExecutorCommitTxnFunc)
+// TxnManager is used to enable running multiple queries with an internal
+// executor in a transactional manner.
+type TxnManager interface {
+ sqlutil.InternalExecutorFactory
// DescsTxnWithExecutor enables using an internal executor to run sql
// statements in a transactional manner. It creates a descriptor collection
@@ -138,11 +128,3 @@ func (cf *CollectionFactory) NewCollection(
return newCollection(ctx, cf.leaseMgr, cf.settings, cf.codec, cf.hydrated, cf.systemDatabase,
cf.virtualSchemas, temporarySchemaProvider, monitor)
}
-
-// SetInternalExecutorWithTxn is to set the internal executor factory hanging
-// off the collection factory.
-func (cf *CollectionFactory) SetInternalExecutorWithTxn(
- ieFactoryWithTxn InternalExecutorFactoryWithTxn,
-) {
- cf.ieFactoryWithTxn = ieFactoryWithTxn
-}
diff --git a/pkg/sql/catalog/descs/system_table.go b/pkg/sql/catalog/descs/system_table.go
index 0191ea1c1149..b97b32d16afa 100644
--- a/pkg/sql/catalog/descs/system_table.go
+++ b/pkg/sql/catalog/descs/system_table.go
@@ -21,19 +21,21 @@ import (
// systemTableIDResolver is the implementation for catalog.SystemTableIDResolver.
type systemTableIDResolver struct {
- collectionFactory *CollectionFactory
- db *kv.DB
+ collectionFactory *CollectionFactory
+ internalExecutorFactory TxnManager
+ db *kv.DB
}
var _ catalog.SystemTableIDResolver = (*systemTableIDResolver)(nil)
// MakeSystemTableIDResolver creates an object that implements catalog.SystemTableIDResolver.
func MakeSystemTableIDResolver(
- collectionFactory *CollectionFactory, db *kv.DB,
+ collectionFactory *CollectionFactory, internalExecutorFactory TxnManager, db *kv.DB,
) catalog.SystemTableIDResolver {
return &systemTableIDResolver{
- collectionFactory: collectionFactory,
- db: db,
+ collectionFactory: collectionFactory,
+ internalExecutorFactory: internalExecutorFactory,
+ db: db,
}
}
@@ -43,7 +45,7 @@ func (r *systemTableIDResolver) LookupSystemTableID(
) (descpb.ID, error) {
var id descpb.ID
- if err := r.collectionFactory.Txn(ctx, r.db, func(
+ if err := r.internalExecutorFactory.DescsTxn(ctx, r.db, func(
ctx context.Context, txn *kv.Txn, descriptors *Collection,
) (err error) {
id, err = descriptors.stored.LookupDescriptorID(
diff --git a/pkg/sql/catalog/descs/txn.go b/pkg/sql/catalog/descs/txn.go
index 0b3bba3168ef..559ba6524d8f 100644
--- a/pkg/sql/catalog/descs/txn.go
+++ b/pkg/sql/catalog/descs/txn.go
@@ -12,74 +12,19 @@ package descs
import (
"context"
- "time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
- "github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
- "github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
- "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/errors"
)
-// Txn enables callers to run transactions with a *Collection such that all
-// retrieved immutable descriptors are properly leased and all mutable
-// descriptors are handled. The function deals with verifying the two version
-// invariant and retrying when it is violated. Callers need not worry that they
-// write mutable descriptors multiple times. The call will explicitly wait for
-// the leases to drain on old versions of descriptors modified or deleted in the
-// transaction; callers do not need to call lease.WaitForOneVersion.
-//
-// The passed transaction is pre-emptively anchored to the system config key on
-// the system tenant.
-// Deprecated: Use cf.TxnWithExecutor().
-func (cf *CollectionFactory) Txn(
- ctx context.Context,
- db *kv.DB,
- f func(ctx context.Context, txn *kv.Txn, descriptors *Collection) error,
- opts ...TxnOption,
-) error {
- return cf.TxnWithExecutor(ctx, db, nil /* sessionData */, func(
- ctx context.Context, txn *kv.Txn, descriptors *Collection, _ sqlutil.InternalExecutor,
- ) error {
- return f(ctx, txn, descriptors)
- }, opts...)
-}
-
-// TxnOption is used to configure a Txn or TxnWithExecutor.
-type TxnOption interface {
- apply(*txnConfig)
-}
-
-type txnConfig struct {
- steppingEnabled bool
-}
-
-type txnOptionFn func(options *txnConfig)
-
-func (f txnOptionFn) apply(options *txnConfig) { f(options) }
-
-var steppingEnabled = txnOptionFn(func(o *txnConfig) {
- o.steppingEnabled = true
-})
-
-// SteppingEnabled creates a TxnOption to determine whether the underlying
-// transaction should have stepping enabled. If stepping is enabled, the
-// transaction will implicitly use lower admission priority. However, the
-// user will need to remember to Step the Txn to make writes visible. The
-// InternalExecutor will automatically (for better or for worse) step the
-// transaction when executing each statement.
-func SteppingEnabled() TxnOption {
- return steppingEnabled
-}
-
// TxnWithExecutorFunc is used to run a transaction in the context of a
// Collection and an InternalExecutor.
type TxnWithExecutorFunc = func(
@@ -89,97 +34,6 @@ type TxnWithExecutorFunc = func(
ie sqlutil.InternalExecutor,
) error
-// TxnWithExecutor enables callers to run transactions with a *Collection such that all
-// retrieved immutable descriptors are properly leased and all mutable
-// descriptors are handled. The function deals with verifying the two version
-// invariant and retrying when it is violated. Callers need not worry that they
-// write mutable descriptors multiple times. The call will explicitly wait for
-// the leases to drain on old versions of descriptors modified or deleted in the
-// transaction; callers do not need to call lease.WaitForOneVersion.
-// It also enables using internal executor to run sql queries in a txn manner.
-//
-// The passed transaction is pre-emptively anchored to the system config key on
-// the system tenant.
-func (cf *CollectionFactory) TxnWithExecutor(
- ctx context.Context,
- db *kv.DB,
- sd *sessiondata.SessionData,
- f TxnWithExecutorFunc,
- opts ...TxnOption,
-) error {
- var config txnConfig
- for _, opt := range opts {
- opt.apply(&config)
- }
- run := db.Txn
- if config.steppingEnabled {
- type kvTxnFunc = func(context.Context, *kv.Txn) error
- run = func(ctx context.Context, f kvTxnFunc) error {
- return db.TxnWithSteppingEnabled(ctx, sessiondatapb.Normal, f)
- }
- }
-
- // Waits for descriptors that were modified, skipping
- // over ones that had their descriptor wiped.
- waitForDescriptors := func(modifiedDescriptors []lease.IDVersion, deletedDescs catalog.DescriptorIDSet) error {
- // Wait for a single version on leased descriptors.
- for _, ld := range modifiedDescriptors {
- waitForNoVersion := deletedDescs.Contains(ld.ID)
- retryOpts := retry.Options{
- InitialBackoff: time.Millisecond,
- Multiplier: 1.5,
- MaxBackoff: time.Second,
- }
- // Detect unpublished ones.
- if waitForNoVersion {
- err := cf.leaseMgr.WaitForNoVersion(ctx, ld.ID, retryOpts)
- if err != nil {
- return err
- }
- } else {
- _, err := cf.leaseMgr.WaitForOneVersion(ctx, ld.ID, retryOpts)
- // If the descriptor has been deleted, just wait for leases to drain.
- if errors.Is(err, catalog.ErrDescriptorNotFound) {
- err = cf.leaseMgr.WaitForNoVersion(ctx, ld.ID, retryOpts)
- }
- if err != nil {
- return err
- }
- }
- }
- return nil
- }
- for {
- var withNewVersion []lease.IDVersion
- var deletedDescs catalog.DescriptorIDSet
- if err := run(ctx, func(ctx context.Context, txn *kv.Txn) (err error) {
- withNewVersion, deletedDescs = nil, catalog.DescriptorIDSet{}
- descsCol := cf.NewCollection(
- ctx, nil, /* temporarySchemaProvider */
- cf.ieFactoryWithTxn.MemoryMonitor(),
- )
- defer descsCol.ReleaseAll(ctx)
- ie, commitTxnFn := cf.ieFactoryWithTxn.NewInternalExecutorWithTxn(sd, &cf.settings.SV, txn, descsCol)
- if err := f(ctx, txn, descsCol, ie); err != nil {
- return err
- }
- deletedDescs = descsCol.deletedDescs
- withNewVersion, err = descsCol.GetOriginalPreviousIDVersionsForUncommitted()
- if err != nil {
- return err
- }
- return commitTxnFn(ctx)
- }); IsTwoVersionInvariantViolationError(err) {
- continue
- } else {
- if err == nil {
- err = waitForDescriptors(withNewVersion, deletedDescs)
- }
- return err
- }
- }
-}
-
// CheckTwoVersionInvariant checks whether any new schema being modified written
// at a version V has only valid leases at version = V - 1. A transaction retry
// error as well as a boolean is returned whenever the invariant is violated.
diff --git a/pkg/sql/catalog/descs/txn_external_test.go b/pkg/sql/catalog/descs/txn_external_test.go
index 941610269f51..f9cc296d50aa 100644
--- a/pkg/sql/catalog/descs/txn_external_test.go
+++ b/pkg/sql/catalog/descs/txn_external_test.go
@@ -17,6 +17,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
@@ -34,12 +35,12 @@ func TestTxnWithStepping(t *testing.T) {
s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(ctx)
- cf := s.CollectionFactory().(*descs.CollectionFactory)
+ ief := s.InternalExecutorFactory().(descs.TxnManager)
scratchKey, err := s.ScratchRange()
require.NoError(t, err)
// Write a key, read in the transaction without stepping, ensure we
// do not see the value, step the transaction, then ensure that we do.
- require.NoError(t, cf.Txn(ctx, kvDB, func(
+ require.NoError(t, ief.DescsTxn(ctx, kvDB, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
if err := txn.Put(ctx, scratchKey, 1); err != nil {
@@ -67,5 +68,5 @@ func TestTxnWithStepping(t *testing.T) {
}
}
return nil
- }, descs.SteppingEnabled()))
+ }, sqlutil.SteppingEnabled()))
}
diff --git a/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go b/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go
index 3a7748368f24..b631ec73c498 100644
--- a/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go
+++ b/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go
@@ -51,7 +51,6 @@ func TestTxnWithExecutorDataDriven(t *testing.T) {
datadriven.Walk(t, testutils.TestDataPath(t, ""), func(t *testing.T, path string) {
s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(ctx)
- cf := s.CollectionFactory().(*descs.CollectionFactory)
datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
stmts, err := parser.Parse(d.Input)
require.NoError(t, err)
@@ -70,7 +69,8 @@ func TestTxnWithExecutorDataDriven(t *testing.T) {
searchPath = sessiondata.MakeSearchPath(strings.Split(sp, ","))
}
sd.SearchPath = &searchPath
- err = cf.TxnWithExecutor(ctx, kvDB, nil /* sessionData */, func(
+ ief := s.InternalExecutorFactory().(descs.TxnManager)
+ err = ief.DescsTxnWithExecutor(ctx, kvDB, nil /* sessionData */, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
) error {
for _, stmt := range stmts {
diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go
index 977c75e18a8c..e51f477fc506 100644
--- a/pkg/sql/exec_util.go
+++ b/pkg/sql/exec_util.go
@@ -91,7 +91,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sessionphase"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness"
"github.com/cockroachdb/cockroach/pkg/sql/sqlstats"
- "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/stmtdiagnostics"
"github.com/cockroachdb/cockroach/pkg/sql/types"
@@ -1311,10 +1310,9 @@ type ExecutorConfig struct {
// records.
SpanConfigKVAccessor spanconfig.KVAccessor
- // InternalExecutorFactory is used to create an InternalExecutor binded with
+ // InternalExecutorFactory is used to create an InternalExecutor bound with
// SessionData and other ExtraTxnState.
- // This is currently only for builtin functions where we need to execute sql.
- InternalExecutorFactory sqlutil.InternalExecutorFactory
+ InternalExecutorFactory descs.TxnManager
// ConsistencyChecker is to generate the results in calls to
// crdb_internal.check_consistency.
@@ -3452,7 +3450,7 @@ func DescsTxn(
execCfg *ExecutorConfig,
f func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error,
) error {
- return execCfg.CollectionFactory.Txn(ctx, execCfg.DB, f)
+ return execCfg.InternalExecutorFactory.DescsTxn(ctx, execCfg.DB, f)
}
// TestingDescsTxn is a convenience function for running a transaction on
diff --git a/pkg/sql/execinfra/server_config.go b/pkg/sql/execinfra/server_config.go
index 957fb11c3da1..c74c3ca46442 100644
--- a/pkg/sql/execinfra/server_config.go
+++ b/pkg/sql/execinfra/server_config.go
@@ -149,7 +149,7 @@ type ServerConfig struct {
// InternalExecutorFactory is used to construct session-bound
// executors. The idea is that a higher-layer binds some of the arguments
// required, so that users of ServerConfig don't have to care about them.
- InternalExecutorFactory sqlutil.InternalExecutorFactory
+ InternalExecutorFactory descs.TxnManager
ExternalStorage cloud.ExternalStorageFactory
ExternalStorageFromURI cloud.ExternalStorageFromURIFactory
diff --git a/pkg/sql/internal.go b/pkg/sql/internal.go
index b416485a79c4..32c813211e37 100644
--- a/pkg/sql/internal.go
+++ b/pkg/sql/internal.go
@@ -113,6 +113,8 @@ func (ie *InternalExecutor) WithSyntheticDescriptors(
}
// MakeInternalExecutor creates an InternalExecutor.
+// TODO (janexing): usage of it should be deprecated with `DescsTxnWithExecutor()`
+// or `RunWithoutTxn()`.
func MakeInternalExecutor(
s *Server, memMetrics MemoryMetrics, monitor *mon.BytesMonitor,
) InternalExecutor {
@@ -123,55 +125,6 @@ func MakeInternalExecutor(
}
}
-// newInternalExecutorWithTxn creates an Internal Executor with txn related
-// information, and also a function that can be called to commit the txn.
-// This function should only be used in the implementation of
-// descs.CollectionFactory's InternalExecutorFactoryWithTxn.
-// TODO (janexing): This function will be soon refactored after we change
-// the internal executor infrastructure with a single conn executor for all
-// sql statement executions within a txn.
-func newInternalExecutorWithTxn(
- s *Server,
- sd *sessiondata.SessionData,
- txn *kv.Txn,
- memMetrics MemoryMetrics,
- monitor *mon.BytesMonitor,
- descCol *descs.Collection,
-) (*InternalExecutor, descs.InternalExecutorCommitTxnFunc) {
- schemaChangerState := &SchemaChangerState{
- mode: sd.NewSchemaChangerMode,
- }
- ie := InternalExecutor{
- s: s,
- mon: monitor,
- memMetrics: memMetrics,
- extraTxnState: &extraTxnState{
- txn: txn,
- descCollection: descCol,
- jobs: new(jobsCollection),
- schemaChangeJobRecords: make(map[descpb.ID]*jobs.Record),
- schemaChangerState: schemaChangerState,
- },
- }
- ie.s.populateMinimalSessionData(sd)
- ie.sessionDataStack = sessiondata.NewStack(sd)
-
- commitTxnFunc := func(ctx context.Context) error {
- defer func() {
- ie.extraTxnState.jobs.reset()
- ie.releaseSchemaChangeJobRecords()
- }()
- if err := ie.commitTxn(ctx); err != nil {
- return err
- }
- return ie.s.cfg.JobRegistry.Run(
- ctx, ie.s.cfg.InternalExecutor, *ie.extraTxnState.jobs,
- )
- }
-
- return &ie, commitTxnFunc
-}
-
// MakeInternalExecutorMemMonitor creates and starts memory monitor for an
// InternalExecutor.
func MakeInternalExecutorMemMonitor(
@@ -1288,12 +1241,6 @@ type InternalExecutorFactory struct {
monitor *mon.BytesMonitor
}
-// MemoryMonitor returns the monitor which should be used when constructing
-// things in the context of an internal executor created by this factory.
-func (ief *InternalExecutorFactory) MemoryMonitor() *mon.BytesMonitor {
- return ief.monitor
-}
-
// NewInternalExecutorFactory returns a new internal executor factory.
func NewInternalExecutorFactory(
s *Server, memMetrics MemoryMetrics, monitor *mon.BytesMonitor,
@@ -1306,10 +1253,11 @@ func NewInternalExecutorFactory(
}
var _ sqlutil.InternalExecutorFactory = &InternalExecutorFactory{}
-var _ descs.InternalExecutorFactoryWithTxn = &InternalExecutorFactory{}
+var _ descs.TxnManager = &InternalExecutorFactory{}
// NewInternalExecutor constructs a new internal executor.
-// TODO (janexing): this should be deprecated soon.
+// TODO (janexing): usage of it should be deprecated with `DescsTxnWithExecutor()`
+// or `RunWithoutTxn()`.
func (ief *InternalExecutorFactory) NewInternalExecutor(
sd *sessiondata.SessionData,
) sqlutil.InternalExecutor {
@@ -1318,12 +1266,14 @@ func (ief *InternalExecutorFactory) NewInternalExecutor(
return &ie
}
-// NewInternalExecutorWithTxn creates an internal executor with txn-related info,
-// such as descriptor collection and schema change job records, etc. It should
-// be called only after InternalExecutorFactory.NewInternalExecutor is already
-// called to construct the InternalExecutorFactory with required server info.
-// This function should only be used under CollectionFactory.TxnWithExecutor().
-func (ief *InternalExecutorFactory) NewInternalExecutorWithTxn(
+// newInternalExecutorWithTxn creates an internal executor with txn-related info,
+// such as descriptor collection and schema change job records, etc.
+// This function should only be used under
+// InternalExecutorFactory.DescsTxnWithExecutor().
+// TODO (janexing): This function will be soon refactored after we change
+// the internal executor infrastructure with a single conn executor for all
+// sql statement executions within a txn.
+func (ief *InternalExecutorFactory) newInternalExecutorWithTxn(
sd *sessiondata.SessionData, sv *settings.Values, txn *kv.Txn, descCol *descs.Collection,
) (sqlutil.InternalExecutor, descs.InternalExecutorCommitTxnFunc) {
// By default, if not given session data, we initialize a sessionData that
@@ -1337,16 +1287,39 @@ func (ief *InternalExecutorFactory) NewInternalExecutorWithTxn(
sd = NewFakeSessionData(sv)
sd.UserProto = username.RootUserName().EncodeProto()
}
- ie, commitTxnFunc := newInternalExecutorWithTxn(
- ief.server,
- sd,
- txn,
- ief.memMetrics,
- ief.monitor,
- descCol,
- )
- return ie, commitTxnFunc
+ schemaChangerState := &SchemaChangerState{
+ mode: sd.NewSchemaChangerMode,
+ }
+ ie := InternalExecutor{
+ s: ief.server,
+ mon: ief.monitor,
+ memMetrics: ief.memMetrics,
+ extraTxnState: &extraTxnState{
+ txn: txn,
+ descCollection: descCol,
+ jobs: new(jobsCollection),
+ schemaChangeJobRecords: make(map[descpb.ID]*jobs.Record),
+ schemaChangerState: schemaChangerState,
+ },
+ }
+ ie.s.populateMinimalSessionData(sd)
+ ie.sessionDataStack = sessiondata.NewStack(sd)
+
+ commitTxnFunc := func(ctx context.Context) error {
+ defer func() {
+ ie.extraTxnState.jobs.reset()
+ ie.releaseSchemaChangeJobRecords()
+ }()
+ if err := ie.commitTxn(ctx); err != nil {
+ return err
+ }
+ return ie.s.cfg.JobRegistry.Run(
+ ctx, ie.s.cfg.InternalExecutor, *ie.extraTxnState.jobs,
+ )
+ }
+
+ return &ie, commitTxnFunc
}
// RunWithoutTxn is to create an internal executor without binding to a txn,
@@ -1439,10 +1412,10 @@ func (ief *InternalExecutorFactory) DescsTxnWithExecutor(
withNewVersion, deletedDescs = nil, catalog.DescriptorIDSet{}
descsCol := cf.NewCollection(
ctx, nil, /* temporarySchemaProvider */
- ief.MemoryMonitor(),
+ ief.monitor,
)
defer descsCol.ReleaseAll(ctx)
- ie, commitTxnFn := ief.NewInternalExecutorWithTxn(sd, &cf.GetClusterSettings().SV, txn, descsCol)
+ ie, commitTxnFn := ief.newInternalExecutorWithTxn(sd, &cf.GetClusterSettings().SV, txn, descsCol)
if err := f(ctx, txn, descsCol, ie); err != nil {
return err
}
diff --git a/pkg/sql/schema_change_plan_node.go b/pkg/sql/schema_change_plan_node.go
index b21f45741654..fa0eb642cd2e 100644
--- a/pkg/sql/schema_change_plan_node.go
+++ b/pkg/sql/schema_change_plan_node.go
@@ -132,7 +132,7 @@ func (p *planner) waitForDescriptorSchemaChanges(
)
}
blocked := false
- if err := p.ExecCfg().CollectionFactory.Txn(ctx, p.ExecCfg().DB, func(
+ if err := p.ExecCfg().InternalExecutorFactory.DescsTxn(ctx, p.ExecCfg().DB, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
if err := txn.SetFixedTimestamp(ctx, now); err != nil {
diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go
index ec0f00448719..86322418e13a 100644
--- a/pkg/sql/schema_changer.go
+++ b/pkg/sql/schema_changer.go
@@ -1345,7 +1345,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error {
var didUpdate bool
var depMutationJobs []jobspb.JobID
var otherJobIDs []jobspb.JobID
- err := sc.execCfg.CollectionFactory.Txn(ctx, sc.db, func(
+ err := sc.execCfg.InternalExecutorFactory.DescsTxn(ctx, sc.db, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
depMutationJobs = depMutationJobs[:0]
@@ -2461,7 +2461,7 @@ func (sc *SchemaChanger) txn(
return err
}
}
- return sc.execCfg.CollectionFactory.Txn(ctx, sc.db, f)
+ return sc.execCfg.InternalExecutorFactory.DescsTxn(ctx, sc.db, f)
}
// txnWithExecutor is to run internal executor within a txn.
@@ -2475,7 +2475,7 @@ func (sc *SchemaChanger) txnWithExecutor(
return err
}
}
- return sc.execCfg.CollectionFactory.TxnWithExecutor(ctx, sc.db, sd, f)
+ return sc.execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, sc.db, sd, f)
}
// createSchemaChangeEvalCtx creates an extendedEvalContext() to be used for backfills.
diff --git a/pkg/sql/schemachanger/scdeps/run_deps.go b/pkg/sql/schemachanger/scdeps/run_deps.go
index 47c4bb7a119e..c88db6115f4b 100644
--- a/pkg/sql/schemachanger/scdeps/run_deps.go
+++ b/pkg/sql/schemachanger/scdeps/run_deps.go
@@ -33,6 +33,7 @@ import (
// given arguments.
func NewJobRunDependencies(
collectionFactory *descs.CollectionFactory,
+ ieFactory descs.TxnManager,
db *kv.DB,
backfiller scexec.Backfiller,
merger scexec.Merger,
@@ -51,38 +52,40 @@ func NewJobRunDependencies(
kvTrace bool,
) scrun.JobRunDependencies {
return &jobExecutionDeps{
- collectionFactory: collectionFactory,
- db: db,
- backfiller: backfiller,
- merger: merger,
- rangeCounter: rangeCounter,
- eventLoggerFactory: eventLoggerFactory,
- jobRegistry: jobRegistry,
- job: job,
- codec: codec,
- settings: settings,
- testingKnobs: testingKnobs,
- statements: statements,
- indexValidator: indexValidator,
- commentUpdaterFactory: metadataUpdaterFactory,
- sessionData: sessionData,
- kvTrace: kvTrace,
- statsRefresher: statsRefresher,
+ collectionFactory: collectionFactory,
+ internalExecutorFactory: ieFactory,
+ db: db,
+ backfiller: backfiller,
+ merger: merger,
+ rangeCounter: rangeCounter,
+ eventLoggerFactory: eventLoggerFactory,
+ jobRegistry: jobRegistry,
+ job: job,
+ codec: codec,
+ settings: settings,
+ testingKnobs: testingKnobs,
+ statements: statements,
+ indexValidator: indexValidator,
+ commentUpdaterFactory: metadataUpdaterFactory,
+ sessionData: sessionData,
+ kvTrace: kvTrace,
+ statsRefresher: statsRefresher,
}
}
type jobExecutionDeps struct {
- collectionFactory *descs.CollectionFactory
- db *kv.DB
- eventLoggerFactory func(txn *kv.Txn) scexec.EventLogger
- statsRefresher scexec.StatsRefresher
- backfiller scexec.Backfiller
- merger scexec.Merger
- commentUpdaterFactory MetadataUpdaterFactory
- rangeCounter backfiller.RangeCounter
- jobRegistry *jobs.Registry
- job *jobs.Job
- kvTrace bool
+ collectionFactory *descs.CollectionFactory
+ internalExecutorFactory descs.TxnManager
+ db *kv.DB
+ eventLoggerFactory func(txn *kv.Txn) scexec.EventLogger
+ statsRefresher scexec.StatsRefresher
+ backfiller scexec.Backfiller
+ merger scexec.Merger
+ commentUpdaterFactory MetadataUpdaterFactory
+ rangeCounter backfiller.RangeCounter
+ jobRegistry *jobs.Registry
+ job *jobs.Job
+ kvTrace bool
indexValidator scexec.IndexValidator
@@ -104,7 +107,7 @@ func (d *jobExecutionDeps) ClusterSettings() *cluster.Settings {
func (d *jobExecutionDeps) WithTxnInJob(ctx context.Context, fn scrun.JobTxnFunc) error {
var createdJobs []jobspb.JobID
var tableStatsToRefresh []descpb.ID
- err := d.collectionFactory.Txn(ctx, d.db, func(
+ err := d.internalExecutorFactory.DescsTxn(ctx, d.db, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
pl := d.job.Payload()
@@ -152,7 +155,7 @@ func (d *jobExecutionDeps) WithTxnInJob(ctx context.Context, fn scrun.JobTxnFunc
d.jobRegistry.NotifyToResume(ctx, createdJobs...)
}
if len(tableStatsToRefresh) > 0 {
- err := d.collectionFactory.Txn(ctx, d.db, func(
+ err := d.internalExecutorFactory.DescsTxn(ctx, d.db, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
for _, id := range tableStatsToRefresh {
diff --git a/pkg/sql/schemachanger/scexec/executor_external_test.go b/pkg/sql/schemachanger/scexec/executor_external_test.go
index 305749f1581e..8fe363b87b26 100644
--- a/pkg/sql/schemachanger/scexec/executor_external_test.go
+++ b/pkg/sql/schemachanger/scexec/executor_external_test.go
@@ -55,6 +55,7 @@ type testInfra struct {
lm *lease.Manager
tsql *sqlutils.SQLRunner
cf *descs.CollectionFactory
+ ief descs.TxnManager
}
func (ti testInfra) newExecDeps(
@@ -94,6 +95,7 @@ func setupTestInfra(t testing.TB) *testInfra {
db: tc.Server(0).DB(),
lm: tc.Server(0).LeaseManager().(*lease.Manager),
cf: tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).CollectionFactory,
+ ief: tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).InternalExecutorFactory,
tsql: sqlutils.MakeSQLRunner(tc.ServerConn(0)),
}
}
@@ -102,7 +104,7 @@ func (ti *testInfra) txn(
ctx context.Context,
f func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error,
) error {
- return ti.cf.Txn(ctx, ti.db, f)
+ return ti.ief.DescsTxn(ctx, ti.db, f)
}
func TestExecutorDescriptorMutationOps(t *testing.T) {
diff --git a/pkg/sql/schemachanger/scjob/job.go b/pkg/sql/schemachanger/scjob/job.go
index 95b067fd8fc8..23beb8f01369 100644
--- a/pkg/sql/schemachanger/scjob/job.go
+++ b/pkg/sql/schemachanger/scjob/job.go
@@ -69,6 +69,7 @@ func (n *newSchemaChangeResumer) run(ctx context.Context, execCtxI interface{})
payload := n.job.Payload()
deps := scdeps.NewJobRunDependencies(
execCfg.CollectionFactory,
+ execCfg.InternalExecutorFactory,
execCfg.DB,
execCfg.IndexBackfiller,
execCfg.IndexMerger,
diff --git a/pkg/sql/sessioninit/BUILD.bazel b/pkg/sql/sessioninit/BUILD.bazel
index bdf05954d87f..d2a379ba1a74 100644
--- a/pkg/sql/sessioninit/BUILD.bazel
+++ b/pkg/sql/sessioninit/BUILD.bazel
@@ -19,7 +19,6 @@ go_library(
"//pkg/sql/catalog/descpb",
"//pkg/sql/catalog/descs",
"//pkg/sql/sem/tree",
- "//pkg/sql/sqlutil",
"//pkg/util/log",
"//pkg/util/mon",
"//pkg/util/stop",
@@ -48,7 +47,6 @@ go_test(
"//pkg/sql/catalog/descpb",
"//pkg/sql/catalog/descs",
"//pkg/sql/sessiondatapb",
- "//pkg/sql/sqlutil",
"//pkg/testutils/serverutils",
"//pkg/testutils/sqlutils",
"//pkg/testutils/testcluster",
diff --git a/pkg/sql/sessioninit/cache.go b/pkg/sql/sessioninit/cache.go
index a0b61d314fe8..7c0d48f4fc1d 100644
--- a/pkg/sql/sessioninit/cache.go
+++ b/pkg/sql/sessioninit/cache.go
@@ -24,7 +24,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
- "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/stop"
@@ -106,13 +105,12 @@ func NewCache(account mon.BoundAccount, stopper *stop.Stopper) *Cache {
func (a *Cache) GetAuthInfo(
ctx context.Context,
settings *cluster.Settings,
- ie sqlutil.InternalExecutor,
db *kv.DB,
- f *descs.CollectionFactory,
+ f descs.TxnManager,
username username.SQLUsername,
readFromSystemTables func(
ctx context.Context,
- ie sqlutil.InternalExecutor,
+ f descs.TxnManager,
username username.SQLUsername,
makePlanner func(opName string) (interface{}, func()),
settings *cluster.Settings,
@@ -120,12 +118,12 @@ func (a *Cache) GetAuthInfo(
makePlanner func(opName string) (interface{}, func()),
) (aInfo AuthInfo, err error) {
if !CacheEnabled.Get(&settings.SV) {
- return readFromSystemTables(ctx, ie, username, makePlanner, settings)
+ return readFromSystemTables(ctx, f, username, makePlanner, settings)
}
var usersTableDesc catalog.TableDescriptor
var roleOptionsTableDesc catalog.TableDescriptor
- err = f.Txn(ctx, db, func(
+ err = f.DescsTxn(ctx, db, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
_, usersTableDesc, err = descriptors.GetImmutableTableByName(
@@ -167,7 +165,7 @@ func (a *Cache) GetAuthInfo(
val, err := a.loadValueOutsideOfCache(
ctx, fmt.Sprintf("authinfo-%s-%d-%d", username.Normalized(), usersTableVersion, roleOptionsTableVersion),
func(loadCtx context.Context) (interface{}, error) {
- return readFromSystemTables(loadCtx, ie, username, makePlanner, settings)
+ return readFromSystemTables(loadCtx, f, username, makePlanner, settings)
})
if err != nil {
return aInfo, err
@@ -283,21 +281,20 @@ func (a *Cache) maybeWriteAuthInfoBackToCache(
func (a *Cache) GetDefaultSettings(
ctx context.Context,
settings *cluster.Settings,
- ie sqlutil.InternalExecutor,
db *kv.DB,
- f *descs.CollectionFactory,
+ f descs.TxnManager,
userName username.SQLUsername,
databaseName string,
readFromSystemTables func(
ctx context.Context,
- ie sqlutil.InternalExecutor,
+ f descs.TxnManager,
userName username.SQLUsername,
databaseID descpb.ID,
) ([]SettingsCacheEntry, error),
) (settingsEntries []SettingsCacheEntry, err error) {
var dbRoleSettingsTableDesc catalog.TableDescriptor
var databaseID descpb.ID
- err = f.Txn(ctx, db, func(
+ err = f.DescsTxn(ctx, db, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
_, dbRoleSettingsTableDesc, err = descriptors.GetImmutableTableByName(
@@ -333,7 +330,7 @@ func (a *Cache) GetDefaultSettings(
if !CacheEnabled.Get(&settings.SV) {
settingsEntries, err = readFromSystemTables(
ctx,
- ie,
+ f,
userName,
databaseID,
)
@@ -357,7 +354,7 @@ func (a *Cache) GetDefaultSettings(
val, err := a.loadValueOutsideOfCache(
ctx, fmt.Sprintf("defaultsettings-%s-%d-%d", userName.Normalized(), databaseID, dbRoleSettingsTableVersion),
func(loadCtx context.Context) (interface{}, error) {
- return readFromSystemTables(loadCtx, ie, userName, databaseID)
+ return readFromSystemTables(loadCtx, f, userName, databaseID)
},
)
if err != nil {
diff --git a/pkg/sql/sessioninit/cache_test.go b/pkg/sql/sessioninit/cache_test.go
index 337b5095f5ae..c9d344fa9882 100644
--- a/pkg/sql/sessioninit/cache_test.go
+++ b/pkg/sql/sessioninit/cache_test.go
@@ -25,7 +25,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/sql/sessioninit"
- "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
@@ -64,12 +63,11 @@ func TestCacheInvalidation(t *testing.T) {
settings, err := execCfg.SessionInitCache.GetDefaultSettings(
ctx,
s.ClusterSettings(),
- s.InternalExecutor().(sqlutil.InternalExecutor),
s.DB(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(*sql.InternalExecutorFactory),
username.TestUserName(),
"defaultdb",
- func(ctx context.Context, ie sqlutil.InternalExecutor, userName username.SQLUsername, databaseID descpb.ID) ([]sessioninit.SettingsCacheEntry, error) {
+ func(ctx context.Context, ief descs.TxnManager, userName username.SQLUsername, databaseID descpb.ID) ([]sessioninit.SettingsCacheEntry, error) {
didReadFromSystemTable = true
return nil, nil
})
@@ -91,11 +89,10 @@ func TestCacheInvalidation(t *testing.T) {
aInfo, err := execCfg.SessionInitCache.GetAuthInfo(
ctx,
settings,
- s.InternalExecutor().(sqlutil.InternalExecutor),
s.DB(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(*sql.InternalExecutorFactory),
username.TestUserName(),
- func(ctx context.Context, ie sqlutil.InternalExecutor, userName username.SQLUsername, makePlanner func(opName string) (interface{}, func()), settings *cluster.Settings) (sessioninit.AuthInfo, error) {
+ func(ctx context.Context, f descs.TxnManager, userName username.SQLUsername, makePlanner func(opName string) (interface{}, func()), settings *cluster.Settings) (sessioninit.AuthInfo, error) {
didReadFromSystemTable = true
return sessioninit.AuthInfo{}, nil
},
@@ -218,7 +215,6 @@ func TestCacheSingleFlight(t *testing.T) {
defer s.Stopper().Stop(ctx)
execCfg := s.ExecutorConfig().(sql.ExecutorConfig)
settings := s.ExecutorConfig().(sql.ExecutorConfig).Settings
- ie := s.InternalExecutor().(sqlutil.InternalExecutor)
c := s.ExecutorConfig().(sql.ExecutorConfig).SessionInitCache
testuser := username.MakeSQLUsernameFromPreNormalizedString("test")
@@ -247,9 +243,9 @@ func TestCacheSingleFlight(t *testing.T) {
go func() {
didReadFromSystemTable := false
- _, err := c.GetAuthInfo(ctx, settings, ie, s.DB(), s.ExecutorConfig().(sql.ExecutorConfig).CollectionFactory, testuser, func(
+ _, err := c.GetAuthInfo(ctx, settings, s.DB(), s.ExecutorConfig().(sql.ExecutorConfig).InternalExecutorFactory, testuser, func(
ctx context.Context,
- ie sqlutil.InternalExecutor,
+ f descs.TxnManager,
userName username.SQLUsername,
makePlanner func(opName string) (interface{}, func()),
settings *cluster.Settings,
@@ -274,9 +270,9 @@ func TestCacheSingleFlight(t *testing.T) {
for i := 0; i < 2; i++ {
go func() {
didReadFromSystemTable := false
- _, err := c.GetAuthInfo(ctx, settings, ie, s.DB(), s.ExecutorConfig().(sql.ExecutorConfig).CollectionFactory, testuser, func(
+ _, err := c.GetAuthInfo(ctx, settings, s.DB(), s.ExecutorConfig().(sql.ExecutorConfig).InternalExecutorFactory, testuser, func(
ctx context.Context,
- ie sqlutil.InternalExecutor,
+ f descs.TxnManager,
userName username.SQLUsername,
makePlanner func(opName string) (interface{}, func()),
settings *cluster.Settings,
@@ -298,9 +294,9 @@ func TestCacheSingleFlight(t *testing.T) {
// GetAuthInfo should not be using the cache since it is outdated.
didReadFromSystemTable := false
- _, err = c.GetAuthInfo(ctx, settings, ie, s.DB(), s.ExecutorConfig().(sql.ExecutorConfig).CollectionFactory, testuser, func(
+ _, err = c.GetAuthInfo(ctx, settings, s.DB(), s.ExecutorConfig().(sql.ExecutorConfig).InternalExecutorFactory, testuser, func(
ctx context.Context,
- ie sqlutil.InternalExecutor,
+ f descs.TxnManager,
userName username.SQLUsername,
makePlanner func(opName string) (interface{}, func()),
settings *cluster.Settings,
diff --git a/pkg/sql/stats/automatic_stats.go b/pkg/sql/stats/automatic_stats.go
index 3df4bc30cf51..fa8105453f5f 100644
--- a/pkg/sql/stats/automatic_stats.go
+++ b/pkg/sql/stats/automatic_stats.go
@@ -351,7 +351,7 @@ func (r *Refresher) autoStatsFractionStaleRows(explicitSettings *catpb.AutoStats
func (r *Refresher) getTableDescriptor(
ctx context.Context, tableID descpb.ID,
) (desc catalog.TableDescriptor) {
- if err := r.cache.collectionFactory.Txn(ctx, r.cache.ClientDB, func(
+ if err := r.cache.internalExecutorFactory.DescsTxn(ctx, r.cache.ClientDB, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) (err error) {
flags := tree.ObjectLookupFlagsWithRequired()
diff --git a/pkg/sql/stats/automatic_stats_test.go b/pkg/sql/stats/automatic_stats_test.go
index 1a2265559168..58d7c883666b 100644
--- a/pkg/sql/stats/automatic_stats_test.go
+++ b/pkg/sql/stats/automatic_stats_test.go
@@ -73,7 +73,7 @@ func TestMaybeRefreshStats(t *testing.T) {
kvDB,
executor,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
refresher := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */)
@@ -200,7 +200,7 @@ func TestEnsureAllTablesQueries(t *testing.T) {
kvDB,
executor,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
r := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */)
@@ -306,7 +306,7 @@ func TestAverageRefreshTime(t *testing.T) {
kvDB,
executor,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
refresher := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */)
@@ -554,7 +554,7 @@ func TestAutoStatsReadOnlyTables(t *testing.T) {
kvDB,
executor,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
refresher := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */)
@@ -611,7 +611,7 @@ func TestAutoStatsOnStartupClusterSettingOff(t *testing.T) {
kvDB,
executor,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
refresher := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */)
@@ -659,7 +659,7 @@ func TestNoRetryOnFailure(t *testing.T) {
kvDB,
executor,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
r := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */)
@@ -776,7 +776,7 @@ func TestAnalyzeSystemTables(t *testing.T) {
kvDB,
executor,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
var tableNames []string
diff --git a/pkg/sql/stats/delete_stats_test.go b/pkg/sql/stats/delete_stats_test.go
index 6dc2955e9de4..2925ad405df1 100644
--- a/pkg/sql/stats/delete_stats_test.go
+++ b/pkg/sql/stats/delete_stats_test.go
@@ -46,7 +46,7 @@ func TestDeleteOldStatsForColumns(t *testing.T) {
db,
ex,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
@@ -343,7 +343,7 @@ func TestDeleteOldStatsForOtherColumns(t *testing.T) {
db,
ex,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
testData := []TableStatisticProto{
diff --git a/pkg/sql/stats/stats_cache.go b/pkg/sql/stats/stats_cache.go
index 74e66d38c6b9..85cce35b958b 100644
--- a/pkg/sql/stats/stats_cache.go
+++ b/pkg/sql/stats/stats_cache.go
@@ -72,8 +72,7 @@ type TableStatisticsCache struct {
SQLExecutor sqlutil.InternalExecutor
Settings *cluster.Settings
- // Used to resolve descriptors.
- collectionFactory *descs.CollectionFactory
+ internalExecutorFactory descs.TxnManager
// Used when decoding KV from the range feed.
datumAlloc tree.DatumAlloc
@@ -121,13 +120,13 @@ func NewTableStatisticsCache(
db *kv.DB,
sqlExecutor sqlutil.InternalExecutor,
settings *cluster.Settings,
- cf *descs.CollectionFactory,
+ ief descs.TxnManager,
) *TableStatisticsCache {
tableStatsCache := &TableStatisticsCache{
- ClientDB: db,
- SQLExecutor: sqlExecutor,
- Settings: settings,
- collectionFactory: cf,
+ ClientDB: db,
+ SQLExecutor: sqlExecutor,
+ Settings: settings,
+ internalExecutorFactory: ief,
}
tableStatsCache.mu.cache = cache.NewUnorderedCache(cache.Config{
Policy: cache.CacheLRU,
@@ -602,7 +601,7 @@ func (sc *TableStatisticsCache) parseStats(
// TypeDescriptor's with the timestamp that the stats were recorded with.
//
// TODO(ajwerner): We now do delete members from enum types. See #67050.
- if err := sc.collectionFactory.Txn(ctx, sc.ClientDB, func(
+ if err := sc.internalExecutorFactory.DescsTxn(ctx, sc.ClientDB, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
resolver := descs.NewDistSQLTypeResolver(descriptors, txn)
diff --git a/pkg/sql/stats/stats_cache_test.go b/pkg/sql/stats/stats_cache_test.go
index d0a272b800e1..ddc0577b87a0 100644
--- a/pkg/sql/stats/stats_cache_test.go
+++ b/pkg/sql/stats/stats_cache_test.go
@@ -245,7 +245,7 @@ func TestCacheBasic(t *testing.T) {
db,
ex,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, sc.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
for _, tableID := range tableIDs {
@@ -351,7 +351,7 @@ func TestCacheUserDefinedTypes(t *testing.T) {
kvDB,
s.InternalExecutor().(sqlutil.InternalExecutor),
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, sc.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
tbl := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "tt")
@@ -409,7 +409,7 @@ func TestCacheWait(t *testing.T) {
db,
ex,
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, sc.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
for _, tableID := range tableIDs {
@@ -464,7 +464,7 @@ func TestCacheAutoRefresh(t *testing.T) {
s.DB(),
s.InternalExecutor().(sqlutil.InternalExecutor),
s.ClusterSettings(),
- s.CollectionFactory().(*descs.CollectionFactory),
+ s.InternalExecutorFactory().(descs.TxnManager),
)
require.NoError(t, sc.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory)))
diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go
index 4342de9f6b1b..c80cb3332e1f 100644
--- a/pkg/sql/temporary_schema.go
+++ b/pkg/sql/temporary_schema.go
@@ -175,7 +175,7 @@ func cleanupSessionTempObjects(
sessionID clusterunique.ID,
) error {
tempSchemaName := temporarySchemaName(sessionID)
- return ief.(descs.InternalExecutorFactoryWithTxn).DescsTxnWithExecutor(
+ return ief.(descs.TxnManager).DescsTxnWithExecutor(
ctx, db, nil /* sessionData */, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
ie sqlutil.InternalExecutor,
diff --git a/pkg/sql/temporary_schema_test.go b/pkg/sql/temporary_schema_test.go
index fa45359fc465..f03c968bcc9d 100644
--- a/pkg/sql/temporary_schema_test.go
+++ b/pkg/sql/temporary_schema_test.go
@@ -96,8 +96,8 @@ INSERT INTO perm_table VALUES (DEFAULT, 1);
require.NoError(t, rows.Close())
}
execCfg := s.ExecutorConfig().(ExecutorConfig)
- cf := execCfg.CollectionFactory
- require.NoError(t, cf.TxnWithExecutor(ctx, kvDB, nil /* sessionData */, func(
+ ief := execCfg.InternalExecutorFactory
+ require.NoError(t, ief.DescsTxnWithExecutor(ctx, kvDB, nil /* sessionData */, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
ie sqlutil.InternalExecutor,
) error {
diff --git a/pkg/sql/user.go b/pkg/sql/user.go
index 1720839cfced..ec5b53105b7a 100644
--- a/pkg/sql/user.go
+++ b/pkg/sql/user.go
@@ -100,7 +100,7 @@ func GetUserSessionInitInfo(
// necessary.
rootFn := func(ctx context.Context) (expired bool, ret password.PasswordHash, err error) {
err = runFn(ctx, func(ctx context.Context) error {
- authInfo, _, err := retrieveSessionInitInfoWithCache(ctx, execCfg, ie, user, databaseName)
+ authInfo, _, err := retrieveSessionInitInfoWithCache(ctx, execCfg, user, databaseName)
if err != nil {
return err
}
@@ -126,14 +126,14 @@ func GetUserSessionInitInfo(
// Other users must reach for system.users no matter what, because
// only that contains the truth about whether the user exists.
authInfo, settingsEntries, err = retrieveSessionInitInfoWithCache(
- ctx, execCfg, ie, user, databaseName,
+ ctx, execCfg, user, databaseName,
)
if err != nil {
return err
}
// Find whether the user is an admin.
- return execCfg.CollectionFactory.Txn(ctx, execCfg.DB, func(
+ return execCfg.InternalExecutorFactory.DescsTxn(ctx, execCfg.DB, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
memberships, err := MemberOfWithAdminOption(
@@ -203,11 +203,7 @@ func getUserInfoRunFn(
}
func retrieveSessionInitInfoWithCache(
- ctx context.Context,
- execCfg *ExecutorConfig,
- ie *InternalExecutor,
- userName username.SQLUsername,
- databaseName string,
+ ctx context.Context, execCfg *ExecutorConfig, userName username.SQLUsername, databaseName string,
) (aInfo sessioninit.AuthInfo, settingsEntries []sessioninit.SettingsCacheEntry, err error) {
if err = func() (retErr error) {
makePlanner := func(opName string) (interface{}, func()) {
@@ -223,9 +219,8 @@ func retrieveSessionInitInfoWithCache(
aInfo, retErr = execCfg.SessionInitCache.GetAuthInfo(
ctx,
execCfg.Settings,
- ie,
execCfg.DB,
- execCfg.CollectionFactory,
+ execCfg.InternalExecutorFactory,
userName,
retrieveAuthInfo,
makePlanner,
@@ -240,9 +235,8 @@ func retrieveSessionInitInfoWithCache(
settingsEntries, retErr = execCfg.SessionInitCache.GetDefaultSettings(
ctx,
execCfg.Settings,
- ie,
execCfg.DB,
- execCfg.CollectionFactory,
+ execCfg.InternalExecutorFactory,
userName,
databaseName,
retrieveDefaultSettings,
@@ -258,7 +252,7 @@ func retrieveSessionInitInfoWithCache(
func retrieveAuthInfo(
ctx context.Context,
- ie sqlutil.InternalExecutor,
+ f descs.TxnManager,
user username.SQLUsername,
makePlanner func(opName string) (interface{}, func()),
settings *cluster.Settings,
@@ -268,10 +262,16 @@ func retrieveAuthInfo(
// we should always look up the latest data.
const getHashedPassword = `SELECT "hashedPassword" FROM system.public.users ` +
`WHERE username=$1`
- values, err := ie.QueryRowEx(
- ctx, "get-hashed-pwd", nil, /* txn */
- sessiondata.InternalExecutorOverride{User: username.RootUserName()},
- getHashedPassword, user)
+ var values tree.Datums
+ var err error
+ _ = f.RunWithoutTxn(ctx, func(ctx context.Context, ie sqlutil.InternalExecutor) error {
+ values, err = ie.QueryRowEx(
+ ctx, "get-hashed-pwd", nil, /* txn */
+ sessiondata.InternalExecutorOverride{User: username.RootUserName()},
+ getHashedPassword, user)
+ return err
+ })
+
if err != nil {
return aInfo, errors.Wrapf(err, "error looking up user %s", user)
}
@@ -297,12 +297,17 @@ func retrieveAuthInfo(
const getLoginDependencies = `SELECT option, value FROM system.public.role_options ` +
`WHERE username=$1 AND option IN ('NOLOGIN', 'VALID UNTIL', 'NOSQLLOGIN')`
- roleOptsIt, err := ie.QueryIteratorEx(
- ctx, "get-login-dependencies", nil, /* txn */
- sessiondata.InternalExecutorOverride{User: username.RootUserName()},
- getLoginDependencies,
- user,
- )
+ var roleOptsIt sqlutil.InternalRows
+ _ = f.RunWithoutTxn(ctx, func(ctx context.Context, ie sqlutil.InternalExecutor) error {
+ roleOptsIt, err = ie.QueryIteratorEx(
+ ctx, "get-login-dependencies", nil, /* txn */
+ sessiondata.InternalExecutorOverride{User: username.RootUserName()},
+ getLoginDependencies,
+ user,
+ )
+ return err
+ })
+
if err != nil {
return aInfo, errors.Wrapf(err, "error looking up user %s", user)
}
@@ -366,7 +371,7 @@ func retrieveAuthInfo(
}
func retrieveDefaultSettings(
- ctx context.Context, ie sqlutil.InternalExecutor, user username.SQLUsername, databaseID descpb.ID,
+ ctx context.Context, f descs.TxnManager, user username.SQLUsername, databaseID descpb.ID,
) (settingsEntries []sessioninit.SettingsCacheEntry, retErr error) {
// Add an empty slice for all the keys so that something gets cached and
// prevents a lookup for the same key from happening later.
@@ -398,13 +403,19 @@ WHERE
`
// We use a nil txn as role settings are not tied to any transaction state,
// and we should always look up the latest data.
- defaultSettingsIt, err := ie.QueryIteratorEx(
- ctx, "get-default-settings", nil, /* txn */
- sessiondata.InternalExecutorOverride{User: username.RootUserName()},
- getDefaultSettings,
- user,
- databaseID,
- )
+ var defaultSettingsIt sqlutil.InternalRows
+ var err error
+ _ = f.RunWithoutTxn(ctx, func(ctx context.Context, ie sqlutil.InternalExecutor) error {
+ defaultSettingsIt, err = ie.QueryIteratorEx(
+ ctx, "get-default-settings", nil, /* txn */
+ sessiondata.InternalExecutorOverride{User: username.RootUserName()},
+ getDefaultSettings,
+ user,
+ databaseID,
+ )
+ return err
+ })
+
if err != nil {
return nil, errors.Wrapf(err, "error looking up user %s", user)
}
diff --git a/pkg/upgrade/tenant_upgrade.go b/pkg/upgrade/tenant_upgrade.go
index f6ef9c1a80c7..ad9045314032 100644
--- a/pkg/upgrade/tenant_upgrade.go
+++ b/pkg/upgrade/tenant_upgrade.go
@@ -32,14 +32,15 @@ import (
// TenantDeps are the dependencies of upgrades which perform actions at the
// SQL layer.
type TenantDeps struct {
- DB *kv.DB
- Codec keys.SQLCodec
- Settings *cluster.Settings
- CollectionFactory *descs.CollectionFactory
- LeaseManager *lease.Manager
- JobRegistry *jobs.Registry
- InternalExecutor sqlutil.InternalExecutor
- SessionData *sessiondata.SessionData
+ DB *kv.DB
+ Codec keys.SQLCodec
+ Settings *cluster.Settings
+ CollectionFactory *descs.CollectionFactory
+ InternalExecutorFactory descs.TxnManager
+ LeaseManager *lease.Manager
+ JobRegistry *jobs.Registry
+ InternalExecutor sqlutil.InternalExecutor
+ SessionData *sessiondata.SessionData
SpanConfig struct { // deps for span config upgrades; can be removed accordingly
spanconfig.KVAccessor
diff --git a/pkg/upgrade/upgradejob/upgrade_job.go b/pkg/upgrade/upgradejob/upgrade_job.go
index 902e6339ad69..bbf8291a06f1 100644
--- a/pkg/upgrade/upgradejob/upgrade_job.go
+++ b/pkg/upgrade/upgradejob/upgrade_job.go
@@ -89,15 +89,16 @@ func (r resumer) Resume(ctx context.Context, execCtxI interface{}) error {
err = m.Run(ctx, cv, mc.SystemDeps(), r.j)
case *upgrade.TenantUpgrade:
tenantDeps := upgrade.TenantDeps{
- DB: execCtx.ExecCfg().DB,
- Codec: execCtx.ExecCfg().Codec,
- Settings: execCtx.ExecCfg().Settings,
- CollectionFactory: execCtx.ExecCfg().CollectionFactory,
- LeaseManager: execCtx.ExecCfg().LeaseManager,
- InternalExecutor: execCtx.ExecCfg().InternalExecutor,
- JobRegistry: execCtx.ExecCfg().JobRegistry,
- TestingKnobs: execCtx.ExecCfg().UpgradeTestingKnobs,
- SessionData: execCtx.SessionData(),
+ DB: execCtx.ExecCfg().DB,
+ Codec: execCtx.ExecCfg().Codec,
+ Settings: execCtx.ExecCfg().Settings,
+ CollectionFactory: execCtx.ExecCfg().CollectionFactory,
+ InternalExecutorFactory: execCtx.ExecCfg().InternalExecutorFactory,
+ LeaseManager: execCtx.ExecCfg().LeaseManager,
+ InternalExecutor: execCtx.ExecCfg().InternalExecutor,
+ JobRegistry: execCtx.ExecCfg().JobRegistry,
+ TestingKnobs: execCtx.ExecCfg().UpgradeTestingKnobs,
+ SessionData: execCtx.SessionData(),
}
tenantDeps.SpanConfig.KVAccessor = execCtx.ExecCfg().SpanConfigKVAccessor
tenantDeps.SpanConfig.Splitter = execCtx.ExecCfg().SpanConfigSplitter
diff --git a/pkg/upgrade/upgrades/descriptor_utils.go b/pkg/upgrade/upgrades/descriptor_utils.go
index 77abcc951787..36cfb4767c5c 100644
--- a/pkg/upgrade/upgrades/descriptor_utils.go
+++ b/pkg/upgrade/upgrades/descriptor_utils.go
@@ -88,7 +88,7 @@ func runPostDeserializationChangesOnAllDescriptors(
maybeUpgradeDescriptors := func(
ctx context.Context, d upgrade.TenantDeps, toUpgrade []descpb.ID,
) error {
- return d.CollectionFactory.Txn(ctx, d.DB, func(
+ return d.InternalExecutorFactory.DescsTxn(ctx, d.DB, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
descs, err := descriptors.GetMutableDescriptorsByID(ctx, txn, toUpgrade...)
diff --git a/pkg/upgrade/upgrades/helpers_test.go b/pkg/upgrade/upgrades/helpers_test.go
index d48e4e9ccd55..e3cc524f8562 100644
--- a/pkg/upgrade/upgrades/helpers_test.go
+++ b/pkg/upgrade/upgrades/helpers_test.go
@@ -75,7 +75,7 @@ func InjectLegacyTable(
table catalog.TableDescriptor,
getDeprecatedDescriptor func() *descpb.TableDescriptor,
) {
- err := s.CollectionFactory().(*descs.CollectionFactory).Txn(ctx, s.DB(), func(
+ err := s.InternalExecutorFactory().(descs.TxnManager).DescsTxn(ctx, s.DB(), func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
id := table.GetID()
@@ -140,7 +140,7 @@ func GetTable(
) catalog.TableDescriptor {
var table catalog.TableDescriptor
// Retrieve the table.
- err := s.CollectionFactory().(*descs.CollectionFactory).Txn(ctx, s.DB(), func(
+ err := s.InternalExecutorFactory().(descs.TxnManager).DescsTxn(ctx, s.DB(), func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) (err error) {
table, err = descriptors.GetImmutableTableByID(ctx, txn, tableID, tree.ObjectLookupFlags{
diff --git a/pkg/upgrade/upgrades/schema_changes.go b/pkg/upgrade/upgrades/schema_changes.go
index a4d25773a4ab..427580b2d5aa 100644
--- a/pkg/upgrade/upgrades/schema_changes.go
+++ b/pkg/upgrade/upgrades/schema_changes.go
@@ -142,7 +142,7 @@ func readTableDescriptor(
) (catalog.TableDescriptor, error) {
var t catalog.TableDescriptor
- if err := d.CollectionFactory.Txn(ctx, d.DB, func(
+ if err := d.InternalExecutorFactory.DescsTxn(ctx, d.DB, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) (err error) {
t, err = descriptors.GetImmutableTableByID(ctx, txn, tableID, tree.ObjectLookupFlags{
diff --git a/pkg/upgrade/upgrades/schema_changes_external_test.go b/pkg/upgrade/upgrades/schema_changes_external_test.go
index 75b039208eb7..72829fd04787 100644
--- a/pkg/upgrade/upgrades/schema_changes_external_test.go
+++ b/pkg/upgrade/upgrades/schema_changes_external_test.go
@@ -306,7 +306,7 @@ CREATE TABLE test.test_table (
tdb.Exec(t, "CREATE DATABASE test")
tdb.Exec(t, createTableAfter)
var desc catalog.TableDescriptor
- require.NoError(t, s.CollectionFactory().(*descs.CollectionFactory).Txn(ctx, s.DB(), func(
+ require.NoError(t, s.InternalExecutorFactory().(descs.TxnManager).DescsTxn(ctx, s.DB(), func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) (err error) {
tn := tree.MakeTableNameWithSchema("test", "public", "test_table")
diff --git a/pkg/upgrade/upgrades/update_invalid_column_ids_in_sequence_back_references.go b/pkg/upgrade/upgrades/update_invalid_column_ids_in_sequence_back_references.go
index f7f6dc3b1e01..03a315e0f8cd 100644
--- a/pkg/upgrade/upgrades/update_invalid_column_ids_in_sequence_back_references.go
+++ b/pkg/upgrade/upgrades/update_invalid_column_ids_in_sequence_back_references.go
@@ -37,7 +37,7 @@ func updateInvalidColumnIDsInSequenceBackReferences(
for {
var currSeqID descpb.ID
var done bool
- if err := d.CollectionFactory.TxnWithExecutor(ctx, d.DB, d.SessionData, func(
+ if err := d.InternalExecutorFactory.DescsTxnWithExecutor(ctx, d.DB, d.SessionData, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
) (err error) {
currSeqID = lastSeqID
diff --git a/pkg/upgrade/upgrades/upgrade_sequence_to_be_referenced_by_ID.go b/pkg/upgrade/upgrades/upgrade_sequence_to_be_referenced_by_ID.go
index 9d0b2c0a943a..a51da4f916bd 100644
--- a/pkg/upgrade/upgrades/upgrade_sequence_to_be_referenced_by_ID.go
+++ b/pkg/upgrade/upgrades/upgrade_sequence_to_be_referenced_by_ID.go
@@ -35,7 +35,7 @@ import (
func upgradeSequenceToBeReferencedByID(
ctx context.Context, _ clusterversion.ClusterVersion, d upgrade.TenantDeps, _ *jobs.Job,
) error {
- return d.CollectionFactory.TxnWithExecutor(ctx, d.DB, d.SessionData, func(
+ return d.InternalExecutorFactory.DescsTxnWithExecutor(ctx, d.DB, d.SessionData, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor,
) (err error) {
var lastUpgradedID descpb.ID
@@ -112,7 +112,7 @@ func findNextTableToUpgrade(
func maybeUpgradeSeqReferencesInTableOrView(
ctx context.Context, idToUpgrade descpb.ID, d upgrade.TenantDeps,
) error {
- return d.CollectionFactory.Txn(ctx, d.DB, func(
+ return d.InternalExecutorFactory.DescsTxn(ctx, d.DB, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
// Set up: retrieve table desc for `idToUpgrade` and a schema resolver
From 241d790aaecbdf39e6f0f1293c9e318f0c6f3dd9 Mon Sep 17 00:00:00 2001
From: Xiang Gu
Date: Tue, 27 Sep 2022 13:15:01 -0400
Subject: [PATCH 09/16] catalog: added two methods in interface Descriptor and
DescriptorBuilder
This PR added a `[]byte` filed (called `rawBytesInStorage`) in the
[table|schema|database|type|function] immutable descriptor struct and
the [table|schema|database|type|function] descriptor builder struct.
The purpose is to faciliate the next commit where we want to do book
keeping of what's actually in storage of each descriptor, so that we can
use a CPut when updating a descriptor in storage. See next commit for
more details.
Accompanying this added field is two methods added:
- `GetRawBytesInStorage() []byte` in `catalog.Descriptor`, and
- `SetRawBytesInStorage([]byte)` in `catalog.DescriptorBuilder`
They serve as the getter and setter of this field.
We also modified method that gives a builder from a descriptor to carry
over this field.
The way this byte slice field will be initialized and carried over in
a builder and a descriptor will be something like the following graph,
where `XXX` denotes an entity, `xxx` denotes an action, and `==>`
denotes the direction of action.
DESCPB.DESCRIPTOR
| |
| |
construct a builder
| |
| |
V
CATALOG.BUILDER
| |
| |
set byte slice field
| |
| |
V
CATALOG.BUILDER (with the byte slice field set)
| |
| |
build a desc
| |
| |
V
MUTABLEDESCRIPTOR or DESCRIPTOR
(with the byte slice field carried over from the builder)
| |
| |
new builder
| |
| |
V
CATALOG.BUILDER
(with the byte slice field carried over from the descriptor)
| |
| |
...
---
pkg/sql/catalog/dbdesc/database_desc.go | 21 ++++++++++++--
.../catalog/dbdesc/database_desc_builder.go | 10 +++++++
pkg/sql/catalog/descriptor.go | 7 +++++
pkg/sql/catalog/funcdesc/func_desc.go | 21 ++++++++++++--
pkg/sql/catalog/funcdesc/func_desc_builder.go | 10 +++++++
.../catalog/schemadesc/public_schema_desc.go | 1 +
pkg/sql/catalog/schemadesc/schema_desc.go | 21 ++++++++++++--
.../catalog/schemadesc/schema_desc_builder.go | 10 +++++++
.../schemadesc/temporary_schema_desc.go | 1 +
.../catalog/schemadesc/virtual_schema_desc.go | 1 +
pkg/sql/catalog/tabledesc/structured.go | 5 ++++
pkg/sql/catalog/tabledesc/table_desc.go | 28 +++++++++++++++++--
.../catalog/tabledesc/table_desc_builder.go | 18 +++++++++---
.../typedesc/table_implicit_record_type.go | 5 ++++
pkg/sql/catalog/typedesc/type_desc.go | 21 ++++++++++++--
pkg/sql/catalog/typedesc/type_desc_builder.go | 21 +++++++++++---
16 files changed, 183 insertions(+), 18 deletions(-)
diff --git a/pkg/sql/catalog/dbdesc/database_desc.go b/pkg/sql/catalog/dbdesc/database_desc.go
index 154d269ce61d..240b92cf9c3f 100644
--- a/pkg/sql/catalog/dbdesc/database_desc.go
+++ b/pkg/sql/catalog/dbdesc/database_desc.go
@@ -48,6 +48,9 @@ type immutable struct {
// changed represents whether or not the descriptor was changed
// after RunPostDeserializationChanges.
changes catalog.PostDeserializationChanges
+
+ // This is the raw bytes (tag + data) of the database descriptor in storage.
+ rawBytesInStorage []byte
}
// Mutable wraps a database descriptor and provides methods
@@ -142,7 +145,9 @@ func (desc *immutable) ByteSize() int64 {
// NewBuilder implements the catalog.Descriptor interface.
func (desc *immutable) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(desc.DatabaseDesc(), hlc.Timestamp{}, desc.isUncommittedVersion, desc.changes)
+ b := newBuilder(desc.DatabaseDesc(), hlc.Timestamp{}, desc.isUncommittedVersion, desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// NewBuilder implements the catalog.Descriptor interface.
@@ -150,7 +155,9 @@ func (desc *immutable) NewBuilder() catalog.DescriptorBuilder {
// It overrides the wrapper's implementation to deal with the fact that
// mutable has overridden the definition of IsUncommittedVersion.
func (desc *Mutable) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(desc.DatabaseDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b := newBuilder(desc.DatabaseDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// IsMultiRegion implements the DatabaseDescriptor interface.
@@ -521,6 +528,16 @@ func (desc *immutable) SkipNamespace() bool {
return false
}
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *immutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *Mutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
// maybeRemoveDroppedSelfEntryFromSchemas removes an entry in the Schemas map corresponding to the
// database itself which was added due to a bug in prior versions when dropping any user-defined schema.
// The bug inserted an entry for the database rather than the schema being dropped. This function fixes the
diff --git a/pkg/sql/catalog/dbdesc/database_desc_builder.go b/pkg/sql/catalog/dbdesc/database_desc_builder.go
index ebfbb9837255..8fa6f3900058 100644
--- a/pkg/sql/catalog/dbdesc/database_desc_builder.go
+++ b/pkg/sql/catalog/dbdesc/database_desc_builder.go
@@ -40,6 +40,8 @@ type databaseDescriptorBuilder struct {
mvccTimestamp hlc.Timestamp
isUncommittedVersion bool
changes catalog.PostDeserializationChanges
+ // This is the raw bytes (tag + data) of the database descriptor in storage.
+ rawBytesInStorage []byte
}
var _ DatabaseDescriptorBuilder = &databaseDescriptorBuilder{}
@@ -144,6 +146,11 @@ func (ddb *databaseDescriptorBuilder) RunRestoreChanges(
return nil
}
+// SetRawBytesInStorage implements the catalog.DescriptorBuilder interface.
+func (ddb *databaseDescriptorBuilder) SetRawBytesInStorage(rawBytes []byte) {
+ ddb.rawBytesInStorage = append([]byte(nil), rawBytes...) // deep-copy
+}
+
func maybeConvertIncompatibleDBPrivilegesToDefaultPrivileges(
privileges *catpb.PrivilegeDescriptor, defaultPrivileges *catpb.DefaultPrivilegeDescriptor,
) (hasChanged bool) {
@@ -199,6 +206,7 @@ func (ddb *databaseDescriptorBuilder) BuildImmutableDatabase() catalog.DatabaseD
DatabaseDescriptor: *desc,
isUncommittedVersion: ddb.isUncommittedVersion,
changes: ddb.changes,
+ rawBytesInStorage: append([]byte(nil), ddb.rawBytesInStorage...), // deep-copy
}
}
@@ -218,6 +226,7 @@ func (ddb *databaseDescriptorBuilder) BuildExistingMutableDatabase() *Mutable {
DatabaseDescriptor: *ddb.maybeModified,
changes: ddb.changes,
isUncommittedVersion: ddb.isUncommittedVersion,
+ rawBytesInStorage: append([]byte(nil), ddb.rawBytesInStorage...), // deep-copy
},
ClusterVersion: &immutable{DatabaseDescriptor: *ddb.original},
}
@@ -240,6 +249,7 @@ func (ddb *databaseDescriptorBuilder) BuildCreatedMutableDatabase() *Mutable {
DatabaseDescriptor: *desc,
changes: ddb.changes,
isUncommittedVersion: ddb.isUncommittedVersion,
+ rawBytesInStorage: append([]byte(nil), ddb.rawBytesInStorage...), // deep-copy
},
}
}
diff --git a/pkg/sql/catalog/descriptor.go b/pkg/sql/catalog/descriptor.go
index 2237621db9f8..e86cbb3227a6 100644
--- a/pkg/sql/catalog/descriptor.go
+++ b/pkg/sql/catalog/descriptor.go
@@ -88,6 +88,9 @@ type DescriptorBuilder interface {
// upgrade migrations
RunRestoreChanges(descLookupFn func(id descpb.ID) Descriptor) error
+ // SetRawBytesInStorage sets `rawBytesInStorage` field by deep-copying `rawBytes`.
+ SetRawBytesInStorage(rawBytes []byte)
+
// BuildImmutable returns an immutable Descriptor.
BuildImmutable() Descriptor
@@ -225,6 +228,10 @@ type Descriptor interface {
// SkipNamespace is true when a descriptor should not have a namespace record.
SkipNamespace() bool
+
+ // GetRawBytesInStorage returns the raw bytes (tag + data) of the descriptor in storage.
+ // It is exclusively used in the CPut when persisting an updated descriptor to storage.
+ GetRawBytesInStorage() []byte
}
// HydratableDescriptor represent a Descriptor which needs user-define type
diff --git a/pkg/sql/catalog/funcdesc/func_desc.go b/pkg/sql/catalog/funcdesc/func_desc.go
index f71cecabe3f7..8d152dd41c2f 100644
--- a/pkg/sql/catalog/funcdesc/func_desc.go
+++ b/pkg/sql/catalog/funcdesc/func_desc.go
@@ -46,6 +46,9 @@ type immutable struct {
isUncommittedVersion bool
changes catalog.PostDeserializationChanges
+
+ // This is the raw bytes (tag + data) of the function descriptor in storage.
+ rawBytesInStorage []byte
}
// Mutable represents a mutable function descriptor.
@@ -147,12 +150,16 @@ func (desc *immutable) GetDeclarativeSchemaChangerState() *scpb.DescriptorState
// NewBuilder implements the catalog.Descriptor interface.
func (desc *Mutable) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(&desc.FunctionDescriptor, hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b := newBuilder(&desc.FunctionDescriptor, hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// NewBuilder implements the catalog.Descriptor interface.
func (desc *immutable) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(&desc.FunctionDescriptor, hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b := newBuilder(&desc.FunctionDescriptor, hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// GetReferencedDescIDs implements the catalog.Descriptor interface.
@@ -363,6 +370,16 @@ func (desc *immutable) SkipNamespace() bool {
return true
}
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *immutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *Mutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
// IsUncommittedVersion implements the catalog.LeasableDescriptor interface.
func (desc *Mutable) IsUncommittedVersion() bool {
return desc.IsNew() || desc.clusterVersion.GetVersion() != desc.GetVersion()
diff --git a/pkg/sql/catalog/funcdesc/func_desc_builder.go b/pkg/sql/catalog/funcdesc/func_desc_builder.go
index bf0d0140d64c..d28c285fd978 100644
--- a/pkg/sql/catalog/funcdesc/func_desc_builder.go
+++ b/pkg/sql/catalog/funcdesc/func_desc_builder.go
@@ -71,6 +71,8 @@ type functionDescriptorBuilder struct {
mvccTimestamp hlc.Timestamp
isUncommittedVersion bool
changes catalog.PostDeserializationChanges
+ // This is the raw bytes (tag + data) of the function descriptor in storage.
+ rawBytesInStorage []byte
}
// DescriptorType implements the catalog.DescriptorBuilder interface.
@@ -107,6 +109,11 @@ func (fdb *functionDescriptorBuilder) RunRestoreChanges(
return nil
}
+// SetRawBytesInStorage implements the catalog.DescriptorBuilder interface.
+func (fdb *functionDescriptorBuilder) SetRawBytesInStorage(rawBytes []byte) {
+ fdb.rawBytesInStorage = append([]byte(nil), rawBytes...) // deep-copy
+}
+
// BuildImmutable implements the catalog.DescriptorBuilder interface.
func (fdb *functionDescriptorBuilder) BuildImmutable() catalog.Descriptor {
return fdb.BuildImmutableFunction()
@@ -132,6 +139,7 @@ func (fdb *functionDescriptorBuilder) BuildImmutableFunction() catalog.FunctionD
FunctionDescriptor: *desc,
isUncommittedVersion: fdb.isUncommittedVersion,
changes: fdb.changes,
+ rawBytesInStorage: append([]byte(nil), fdb.rawBytesInStorage...), // deep-copy
}
}
@@ -145,6 +153,7 @@ func (fdb *functionDescriptorBuilder) BuildExistingMutableFunction() *Mutable {
FunctionDescriptor: *fdb.maybeModified,
isUncommittedVersion: fdb.isUncommittedVersion,
changes: fdb.changes,
+ rawBytesInStorage: append([]byte(nil), fdb.rawBytesInStorage...), // deep-copy
},
clusterVersion: &immutable{FunctionDescriptor: *fdb.original},
}
@@ -161,6 +170,7 @@ func (fdb *functionDescriptorBuilder) BuildCreatedMutableFunction() *Mutable {
FunctionDescriptor: *desc,
isUncommittedVersion: fdb.isUncommittedVersion,
changes: fdb.changes,
+ rawBytesInStorage: append([]byte(nil), fdb.rawBytesInStorage...), // deep-copy
},
}
}
diff --git a/pkg/sql/catalog/schemadesc/public_schema_desc.go b/pkg/sql/catalog/schemadesc/public_schema_desc.go
index 96bdc5a4fa7a..98cf16c28b0f 100644
--- a/pkg/sql/catalog/schemadesc/public_schema_desc.go
+++ b/pkg/sql/catalog/schemadesc/public_schema_desc.go
@@ -51,6 +51,7 @@ func (p public) GetName() string { return tree.PublicSchema }
func (p public) GetPrivileges() *catpb.PrivilegeDescriptor {
return catpb.NewPublicSchemaPrivilegeDescriptor()
}
+func (p public) GetRawBytesInStorage() []byte { return nil }
// GetPrivilegeDescriptor implements the PrivilegeObject interface.
func (p public) GetPrivilegeDescriptor(
diff --git a/pkg/sql/catalog/schemadesc/schema_desc.go b/pkg/sql/catalog/schemadesc/schema_desc.go
index 0f597b574631..ced3e433840a 100644
--- a/pkg/sql/catalog/schemadesc/schema_desc.go
+++ b/pkg/sql/catalog/schemadesc/schema_desc.go
@@ -49,6 +49,9 @@ type immutable struct {
// changed represents how the descriptor was changed after
// RunPostDeserializationChanges.
changes catalog.PostDeserializationChanges
+
+ // This is the raw bytes (tag + data) of the schema descriptor in storage.
+ rawBytesInStorage []byte
}
func (desc *immutable) SchemaKind() catalog.ResolvedSchemaKind {
@@ -70,6 +73,16 @@ func (desc *immutable) SkipNamespace() bool {
return false
}
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *immutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *Mutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
// SafeMessage makes Mutable a SafeMessager.
func (desc *Mutable) SafeMessage() string {
return formatSafeMessage("schemadesc.Mutable", desc)
@@ -169,12 +182,16 @@ func (desc *immutable) GetDeclarativeSchemaChangerState() *scpb.DescriptorState
// It overrides the wrapper's implementation to deal with the fact that
// mutable has overridden the definition of IsUncommittedVersion.
func (desc *Mutable) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(desc.SchemaDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b := newBuilder(desc.SchemaDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// NewBuilder implements the catalog.Descriptor interface.
func (desc *immutable) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(desc.SchemaDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b := newBuilder(desc.SchemaDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// ValidateSelf implements the catalog.Descriptor interface.
diff --git a/pkg/sql/catalog/schemadesc/schema_desc_builder.go b/pkg/sql/catalog/schemadesc/schema_desc_builder.go
index 34371f8882c4..89668e94e2b1 100644
--- a/pkg/sql/catalog/schemadesc/schema_desc_builder.go
+++ b/pkg/sql/catalog/schemadesc/schema_desc_builder.go
@@ -35,6 +35,8 @@ type schemaDescriptorBuilder struct {
mvccTimestamp hlc.Timestamp
isUncommittedVersion bool
changes catalog.PostDeserializationChanges
+ // This is the raw bytes (tag + data) of the schema descriptor in storage.
+ rawBytesInStorage []byte
}
var _ SchemaDescriptorBuilder = &schemaDescriptorBuilder{}
@@ -119,6 +121,11 @@ func (sdb *schemaDescriptorBuilder) RunRestoreChanges(
return nil
}
+// SetRawBytesInStorage implements the catalog.DescriptorBuilder interface.
+func (sdb *schemaDescriptorBuilder) SetRawBytesInStorage(rawBytes []byte) {
+ sdb.rawBytesInStorage = append([]byte(nil), rawBytes...) // deep-copy
+}
+
// BuildImmutable implements the catalog.DescriptorBuilder interface.
func (sdb *schemaDescriptorBuilder) BuildImmutable() catalog.Descriptor {
return sdb.BuildImmutableSchema()
@@ -134,6 +141,7 @@ func (sdb *schemaDescriptorBuilder) BuildImmutableSchema() catalog.SchemaDescrip
SchemaDescriptor: *desc,
changes: sdb.changes,
isUncommittedVersion: sdb.isUncommittedVersion,
+ rawBytesInStorage: append([]byte(nil), sdb.rawBytesInStorage...), // deep-copy
}
}
@@ -153,6 +161,7 @@ func (sdb *schemaDescriptorBuilder) BuildExistingMutableSchema() *Mutable {
SchemaDescriptor: *sdb.maybeModified,
changes: sdb.changes,
isUncommittedVersion: sdb.isUncommittedVersion,
+ rawBytesInStorage: append([]byte(nil), sdb.rawBytesInStorage...), // deep-copy
},
ClusterVersion: &immutable{SchemaDescriptor: *sdb.original},
}
@@ -175,6 +184,7 @@ func (sdb *schemaDescriptorBuilder) BuildCreatedMutableSchema() *Mutable {
SchemaDescriptor: *desc,
isUncommittedVersion: sdb.isUncommittedVersion,
changes: sdb.changes,
+ rawBytesInStorage: append([]byte(nil), sdb.rawBytesInStorage...), // deep-copy
},
}
}
diff --git a/pkg/sql/catalog/schemadesc/temporary_schema_desc.go b/pkg/sql/catalog/schemadesc/temporary_schema_desc.go
index 401f197eb604..3ce42d389b1f 100644
--- a/pkg/sql/catalog/schemadesc/temporary_schema_desc.go
+++ b/pkg/sql/catalog/schemadesc/temporary_schema_desc.go
@@ -54,6 +54,7 @@ func (p temporary) GetParentID() descpb.ID { return p.parentID }
func (p temporary) GetPrivileges() *catpb.PrivilegeDescriptor {
return catpb.NewTemporarySchemaPrivilegeDescriptor()
}
+func (p temporary) GetRawBytesInStorage() []byte { return nil }
// GetPrivilegeDescriptor implements the PrivilegeObject interface.
func (p temporary) GetPrivilegeDescriptor(
diff --git a/pkg/sql/catalog/schemadesc/virtual_schema_desc.go b/pkg/sql/catalog/schemadesc/virtual_schema_desc.go
index 293d869c4395..557cce579fa9 100644
--- a/pkg/sql/catalog/schemadesc/virtual_schema_desc.go
+++ b/pkg/sql/catalog/schemadesc/virtual_schema_desc.go
@@ -64,6 +64,7 @@ func (p virtual) GetParentID() descpb.ID { return descpb.InvalidID }
func (p virtual) GetPrivileges() *catpb.PrivilegeDescriptor {
return catpb.NewVirtualSchemaPrivilegeDescriptor()
}
+func (p virtual) GetRawBytesInStorage() []byte { return nil }
// GetPrivilegeDescriptor implements the PrivilegeObject interface.
func (p virtual) GetPrivilegeDescriptor(
diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go
index be3fe0775bd5..c7c4d88e950f 100644
--- a/pkg/sql/catalog/tabledesc/structured.go
+++ b/pkg/sql/catalog/tabledesc/structured.go
@@ -1011,6 +1011,11 @@ func (desc *Mutable) OriginalVersion() descpb.DescriptorVersion {
return desc.ClusterVersion().Version
}
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *Mutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
// ClusterVersion returns the version of the table descriptor read from the
// store, if any.
//
diff --git a/pkg/sql/catalog/tabledesc/table_desc.go b/pkg/sql/catalog/tabledesc/table_desc.go
index c89cc23a9f09..1ccda3df5c84 100644
--- a/pkg/sql/catalog/tabledesc/table_desc.go
+++ b/pkg/sql/catalog/tabledesc/table_desc.go
@@ -49,6 +49,14 @@ type wrapper struct {
columnCache *columnCache
changes catalog.PostDeserializationChanges
+
+ // This is the raw bytes (tag + data) of the table descriptor in storage.
+ rawBytesInStorage []byte
+}
+
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *wrapper) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
}
// IsUncommittedVersion implements the catalog.Descriptor interface.
@@ -100,11 +108,23 @@ type immutable struct {
// outboundFKs []*ForeignKeyConstraint
}
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *immutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
// IsUncommittedVersion implements the Descriptor interface.
func (desc *immutable) IsUncommittedVersion() bool {
return desc.isUncommittedVersion
}
+// NewBuilder implements the Descriptor interface.
+func (desc *immutable) NewBuilder() catalog.DescriptorBuilder {
+ b := newBuilder(desc.TableDesc(), hlc.Timestamp{}, desc.isUncommittedVersion, desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
+}
+
// DescriptorProto implements the Descriptor interface.
func (desc *wrapper) DescriptorProto() *descpb.Descriptor {
return &descpb.Descriptor{
@@ -125,7 +145,9 @@ func (desc *wrapper) ByteSize() int64 {
// NewBuilder implements the catalog.Descriptor interface.
func (desc *wrapper) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(desc.TableDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b := newBuilder(desc.TableDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// GetPrimaryIndexID implements the TableDescriptor interface.
@@ -148,7 +170,9 @@ func (desc *Mutable) ImmutableCopy() catalog.Descriptor {
// It overrides the wrapper's implementation to deal with the fact that
// mutable has overridden the definition of IsUncommittedVersion.
func (desc *Mutable) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(desc.TableDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b := newBuilder(desc.TableDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// IsUncommittedVersion implements the Descriptor interface.
diff --git a/pkg/sql/catalog/tabledesc/table_desc_builder.go b/pkg/sql/catalog/tabledesc/table_desc_builder.go
index c1d34d780479..84d6608a29d6 100644
--- a/pkg/sql/catalog/tabledesc/table_desc_builder.go
+++ b/pkg/sql/catalog/tabledesc/table_desc_builder.go
@@ -41,6 +41,8 @@ type tableDescriptorBuilder struct {
isUncommittedVersion bool
skipFKsWithNoMatchingTable bool
changes catalog.PostDeserializationChanges
+ // This is the raw bytes (tag + data) of the table descriptor in storage.
+ rawBytesInStorage []byte
}
var _ TableDescriptorBuilder = &tableDescriptorBuilder{}
@@ -174,6 +176,11 @@ func (tdb *tableDescriptorBuilder) RunRestoreChanges(
return err
}
+// SetRawBytesInStorage implements the catalog.DescriptorBuilder interface.
+func (tdb *tableDescriptorBuilder) SetRawBytesInStorage(rawBytes []byte) {
+ tdb.rawBytesInStorage = append([]byte(nil), rawBytes...) // deep-copy
+}
+
// BuildImmutable implements the catalog.DescriptorBuilder interface.
func (tdb *tableDescriptorBuilder) BuildImmutable() catalog.Descriptor {
return tdb.BuildImmutableTable()
@@ -188,6 +195,7 @@ func (tdb *tableDescriptorBuilder) BuildImmutableTable() catalog.TableDescriptor
imm := makeImmutable(desc)
imm.changes = tdb.changes
imm.isUncommittedVersion = tdb.isUncommittedVersion
+ imm.rawBytesInStorage = append([]byte(nil), tdb.rawBytesInStorage...) // deep-copy
return imm
}
@@ -204,8 +212,9 @@ func (tdb *tableDescriptorBuilder) BuildExistingMutableTable() *Mutable {
}
return &Mutable{
wrapper: wrapper{
- TableDescriptor: *tdb.maybeModified,
- changes: tdb.changes,
+ TableDescriptor: *tdb.maybeModified,
+ changes: tdb.changes,
+ rawBytesInStorage: append([]byte(nil), tdb.rawBytesInStorage...), // deep-copy
},
original: makeImmutable(tdb.original),
}
@@ -225,8 +234,9 @@ func (tdb *tableDescriptorBuilder) BuildCreatedMutableTable() *Mutable {
}
return &Mutable{
wrapper: wrapper{
- TableDescriptor: *desc,
- changes: tdb.changes,
+ TableDescriptor: *desc,
+ changes: tdb.changes,
+ rawBytesInStorage: append([]byte(nil), tdb.rawBytesInStorage...), // deep-copy
},
}
}
diff --git a/pkg/sql/catalog/typedesc/table_implicit_record_type.go b/pkg/sql/catalog/typedesc/table_implicit_record_type.go
index a4b04c9308c2..e1a3516e1bab 100644
--- a/pkg/sql/catalog/typedesc/table_implicit_record_type.go
+++ b/pkg/sql/catalog/typedesc/table_implicit_record_type.go
@@ -222,6 +222,11 @@ func (v TableImplicitRecordType) ValidateTxnCommit(
) {
}
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (v TableImplicitRecordType) GetRawBytesInStorage() []byte {
+ return nil
+}
+
// TypeDesc implements the TypeDescriptor interface.
func (v TableImplicitRecordType) TypeDesc() *descpb.TypeDescriptor {
v.panicNotSupported("TypeDesc")
diff --git a/pkg/sql/catalog/typedesc/type_desc.go b/pkg/sql/catalog/typedesc/type_desc.go
index 431204352ad7..7adf03e10248 100644
--- a/pkg/sql/catalog/typedesc/type_desc.go
+++ b/pkg/sql/catalog/typedesc/type_desc.go
@@ -98,6 +98,9 @@ type immutable struct {
// changes represents how descriptor was changes after
// RunPostDeserializationChanges.
changes catalog.PostDeserializationChanges
+
+ // This is the raw bytes (tag + data) of the type descriptor in storage.
+ rawBytesInStorage []byte
}
// UpdateCachedFieldsOnModifiedMutable refreshes the immutable field by
@@ -188,12 +191,16 @@ func (desc *immutable) GetDeclarativeSchemaChangerState() *scpb.DescriptorState
// It overrides the wrapper's implementation to deal with the fact that
// mutable has overridden the definition of IsUncommittedVersion.
func (desc *Mutable) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(desc.TypeDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b := newBuilder(desc.TypeDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// NewBuilder implements the catalog.Descriptor interface.
func (desc *immutable) NewBuilder() catalog.DescriptorBuilder {
- return newBuilder(desc.TypeDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b := newBuilder(desc.TypeDesc(), hlc.Timestamp{}, desc.IsUncommittedVersion(), desc.changes)
+ b.SetRawBytesInStorage(desc.GetRawBytesInStorage())
+ return b
}
// PrimaryRegionName implements the TypeDescriptor interface.
@@ -1047,6 +1054,16 @@ func (desc *immutable) SkipNamespace() bool {
return false
}
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *immutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
+// GetRawBytesInStorage implements the catalog.Descriptor interface.
+func (desc *Mutable) GetRawBytesInStorage() []byte {
+ return desc.rawBytesInStorage
+}
+
// GetIDClosure implements the TypeDescriptor interface.
func (desc *immutable) GetIDClosure() (map[descpb.ID]struct{}, error) {
ret := make(map[descpb.ID]struct{})
diff --git a/pkg/sql/catalog/typedesc/type_desc_builder.go b/pkg/sql/catalog/typedesc/type_desc_builder.go
index 0d30e0f4c549..3552f621d455 100644
--- a/pkg/sql/catalog/typedesc/type_desc_builder.go
+++ b/pkg/sql/catalog/typedesc/type_desc_builder.go
@@ -35,6 +35,8 @@ type typeDescriptorBuilder struct {
mvccTimestamp hlc.Timestamp
isUncommittedVersion bool
changes catalog.PostDeserializationChanges
+ // This is the raw bytes (tag + data) of the type descriptor in storage.
+ rawBytesInStorage []byte
}
var _ TypeDescriptorBuilder = &typeDescriptorBuilder{}
@@ -118,6 +120,11 @@ func (tdb *typeDescriptorBuilder) RunRestoreChanges(_ func(id descpb.ID) catalog
return nil
}
+// SetRawBytesInStorage implements the catalog.DescriptorBuilder interface.
+func (tdb *typeDescriptorBuilder) SetRawBytesInStorage(rawBytes []byte) {
+ tdb.rawBytesInStorage = append([]byte(nil), rawBytes...) // deep-copy
+}
+
// BuildImmutable implements the catalog.DescriptorBuilder interface.
func (tdb *typeDescriptorBuilder) BuildImmutable() catalog.Descriptor {
return tdb.BuildImmutableType()
@@ -130,6 +137,7 @@ func (tdb *typeDescriptorBuilder) BuildImmutableType() catalog.TypeDescriptor {
desc = tdb.original
}
imm := makeImmutable(desc, tdb.isUncommittedVersion, tdb.changes)
+ imm.rawBytesInStorage = append([]byte(nil), tdb.rawBytesInStorage...) // deep-copy
return &imm
}
@@ -144,10 +152,13 @@ func (tdb *typeDescriptorBuilder) BuildExistingMutableType() *Mutable {
if tdb.maybeModified == nil {
tdb.maybeModified = protoutil.Clone(tdb.original).(*descpb.TypeDescriptor)
}
- clusterVersion := makeImmutable(tdb.original, false, /* isUncommitedVersion */
- catalog.PostDeserializationChanges{})
+ mutableType := makeImmutable(tdb.maybeModified,
+ false /* isUncommitedVersion */, tdb.changes)
+ mutableType.rawBytesInStorage = append([]byte(nil), tdb.rawBytesInStorage...) // deep-copy
+ clusterVersion := makeImmutable(tdb.original,
+ false /* isUncommitedVersion */, catalog.PostDeserializationChanges{})
return &Mutable{
- immutable: makeImmutable(tdb.maybeModified, false /* isUncommitedVersion */, tdb.changes),
+ immutable: mutableType,
ClusterVersion: &clusterVersion,
}
}
@@ -164,8 +175,10 @@ func (tdb *typeDescriptorBuilder) BuildCreatedMutableType() *Mutable {
if desc == nil {
desc = tdb.original
}
+ createdType := makeImmutable(desc, tdb.isUncommittedVersion, tdb.changes)
+ createdType.rawBytesInStorage = append([]byte(nil), tdb.rawBytesInStorage...) // deep-copy
return &Mutable{
- immutable: makeImmutable(desc, tdb.isUncommittedVersion, tdb.changes),
+ immutable: createdType,
}
}
From aa3c59ccb2836ebd04ae89a4a26d14d7ca1bbe0d Mon Sep 17 00:00:00 2001
From: Xiang Gu
Date: Tue, 27 Sep 2022 13:44:47 -0400
Subject: [PATCH 10/16] descs: upgrade `WriteDescToBatch` to use CPut
Previously, `WriteDescToBatch`, which is called to
update a descriptor in storage by a `desc.Collection`, uses `Put`
primitive to directly modify the storage layer. We would like to tighten
it to use a `CPut` to prevent (in)advertent clobbering of that table.
This PR does that by book-keeping the raw bytes of the to-be-updated
descriptor in the descriptor, acquired when we first read it into the
`desc.Collection` from the storage layer. Then, the infrastructural
work done in the previous commit allows us to carry over these raw bytes
as we are preparing the `MutableDescriptor` that we pass into this
`WriteDescToBatch` method.
One additional difficulty is that what if we will need to call
`WriteDescToBatch` more than once in one transaction. For example,
in the new schema changer, statement phase and precommit phase will both
be in one transaction, but we call `WriteDescToBatch` at the end of
each stage. Hence, for some DDL stmts (e.g. `DROP COLUMN`), we will
call `WriteDescToBatch` twice in one transaction. The first call will
modify the descriptor in storage and also added this descriptor to
`desc.Collection.uncommitted` set, so, the second call will get it from
there. To make `CPut` work correctly for the second call, we will need
to get the expected byte slice from the `uncommitted` descriptor set.
This motivates the change to update a descriptor's byte slice field when
it's added to the `uncommitted` descriptor sett.
---
pkg/sql/catalog/descs/BUILD.bazel | 1 +
pkg/sql/catalog/descs/collection.go | 16 +++++++++++++++-
pkg/sql/catalog/descs/collection_test.go | 6 +++---
pkg/sql/catalog/descs/uncommitted_descriptors.go | 16 +++++++++++++++-
pkg/sql/catalog/internal/catkv/catalog_query.go | 1 +
pkg/sql/repair.go | 1 +
6 files changed, 36 insertions(+), 5 deletions(-)
diff --git a/pkg/sql/catalog/descs/BUILD.bazel b/pkg/sql/catalog/descs/BUILD.bazel
index 52d4a1e9d4ec..47b00e214850 100644
--- a/pkg/sql/catalog/descs/BUILD.bazel
+++ b/pkg/sql/catalog/descs/BUILD.bazel
@@ -33,6 +33,7 @@ go_library(
"//pkg/clusterversion",
"//pkg/keys",
"//pkg/kv",
+ "//pkg/roachpb",
"//pkg/settings",
"//pkg/settings/cluster",
"//pkg/spanconfig",
diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go
index f37a2b5c7cd0..cb12f30ee4c6 100644
--- a/pkg/sql/catalog/descs/collection.go
+++ b/pkg/sql/catalog/descs/collection.go
@@ -269,6 +269,20 @@ func (tc *Collection) WriteDescToBatch(
return err
}
}
+ // Retrieve the expected bytes of `desc` in storage.
+ // If this is the first time we write to `desc` in the transaction, its
+ // expected bytes will be retrieved when we read it into this desc.Collection,
+ // and carried over in it.
+ // If, however, this is not the first time we write to `desc` in the transaction,
+ // which means it has existed in `tc.uncommitted`, we will retrieve the expected
+ // bytes from there.
+ var expected []byte
+ if exist := tc.uncommitted.getUncommittedByID(desc.GetID()); exist != nil {
+ expected = exist.GetRawBytesInStorage()
+ } else {
+ expected = desc.GetRawBytesInStorage()
+ }
+
if err := tc.AddUncommittedDescriptor(ctx, desc); err != nil {
return err
}
@@ -277,7 +291,7 @@ func (tc *Collection) WriteDescToBatch(
if kvTrace {
log.VEventf(ctx, 2, "Put %s -> %s", descKey, proto)
}
- b.Put(descKey, proto)
+ b.CPut(descKey, proto, expected)
return nil
}
diff --git a/pkg/sql/catalog/descs/collection_test.go b/pkg/sql/catalog/descs/collection_test.go
index e75b9afd560d..c0f9b157abcd 100644
--- a/pkg/sql/catalog/descs/collection_test.go
+++ b/pkg/sql/catalog/descs/collection_test.go
@@ -284,7 +284,7 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) {
immByNameAfter, err := descriptors.GetImmutableDatabaseByName(ctx, txn, "new_name", flags)
require.NoError(t, err)
require.Equal(t, db.GetVersion(), immByNameAfter.GetVersion())
- require.Equal(t, mut.ImmutableCopy(), immByNameAfter)
+ require.Equal(t, mut.ImmutableCopy().DescriptorProto(), immByNameAfter.DescriptorProto())
_, immByIDAfter, err := descriptors.GetImmutableDatabaseByID(ctx, txn, db.GetID(), flags)
require.NoError(t, err)
@@ -733,7 +733,7 @@ func TestDescriptorCache(t *testing.T) {
}
found := cat.LookupDescriptorEntry(mut.ID)
require.NotEmpty(t, found)
- require.Equal(t, mut.ImmutableCopy(), found)
+ require.Equal(t, mut.ImmutableCopy().DescriptorProto(), found.DescriptorProto())
return nil
}))
})
@@ -768,7 +768,7 @@ func TestDescriptorCache(t *testing.T) {
return err
}
require.Len(t, dbDescs, 4)
- require.Equal(t, mut.ImmutableCopy(), dbDescs[0])
+ require.Equal(t, mut.ImmutableCopy().DescriptorProto(), dbDescs[0].DescriptorProto())
return nil
}))
})
diff --git a/pkg/sql/catalog/descs/uncommitted_descriptors.go b/pkg/sql/catalog/descs/uncommitted_descriptors.go
index b398e1167c65..4d059cc35205 100644
--- a/pkg/sql/catalog/descs/uncommitted_descriptors.go
+++ b/pkg/sql/catalog/descs/uncommitted_descriptors.go
@@ -14,6 +14,7 @@ import (
"context"
"github.com/cockroachdb/cockroach/pkg/keys"
+ "github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree"
@@ -186,7 +187,20 @@ func (ud *uncommittedDescriptors) upsert(
newBytes += mut.ByteSize()
}
ud.mutable.Upsert(mut)
- uncommitted := mut.ImmutableCopy()
+ // Upserting a descriptor into the "uncommitted" set implies
+ // this descriptor is going to be written to storage very soon. We
+ // compute what the raw bytes of this descriptor in storage is going to
+ // look like when that write happens, so that any further update to this
+ // descriptor, which will be retrieved from the "uncommitted" set, will
+ // carry the correct raw bytes in storage with it.
+ var val roachpb.Value
+ if err = val.SetProto(mut.DescriptorProto()); err != nil {
+ return err
+ }
+ rawBytesInStorageAfterPendingWrite := val.TagAndDataBytes()
+ uncommittedBuilder := mut.NewBuilder()
+ uncommittedBuilder.SetRawBytesInStorage(rawBytesInStorageAfterPendingWrite)
+ uncommitted := uncommittedBuilder.BuildImmutable()
newBytes += uncommitted.ByteSize()
ud.uncommitted.Upsert(uncommitted, uncommitted.SkipNamespace())
if err = ud.memAcc.Grow(ctx, newBytes); err != nil {
diff --git a/pkg/sql/catalog/internal/catkv/catalog_query.go b/pkg/sql/catalog/internal/catkv/catalog_query.go
index 5c0c6824dc98..0639130275f2 100644
--- a/pkg/sql/catalog/internal/catkv/catalog_query.go
+++ b/pkg/sql/catalog/internal/catkv/catalog_query.go
@@ -149,6 +149,7 @@ func build(
if err := b.RunPostDeserializationChanges(); err != nil {
return nil, errors.NewAssertionErrorWithWrappedErrf(err, "error during RunPostDeserializationChanges")
}
+ b.SetRawBytesInStorage(rowValue.TagAndDataBytes())
desc := b.BuildImmutable()
if id != desc.GetID() {
return nil, errors.AssertionFailedf("unexpected ID %d in descriptor", desc.GetID())
diff --git a/pkg/sql/repair.go b/pkg/sql/repair.go
index 8c5ffe75f56b..cf4faba4f9e8 100644
--- a/pkg/sql/repair.go
+++ b/pkg/sql/repair.go
@@ -659,6 +659,7 @@ func unsafeReadDescriptor(
if err != nil || b == nil {
return nil, notice, err
}
+ b.SetRawBytesInStorage(descRow.Value.TagAndDataBytes())
return b.BuildExistingMutable(), notice, nil
}
From 4e1299cdad31fe7e278c32aa22de2a39e3cfb589 Mon Sep 17 00:00:00 2001
From: healthy-pod
Date: Mon, 3 Oct 2022 11:05:56 -0700
Subject: [PATCH 11/16] roachtest: skip cdc/bank on ARM64
This code changes skips `cdc/bank` on ARM64 because Confluent CLI
is not available on `linux/arm64`.
Release note: None
---
pkg/cmd/roachtest/tests/cdc.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/pkg/cmd/roachtest/tests/cdc.go b/pkg/cmd/roachtest/tests/cdc.go
index f1457ed97a92..f793ebc929c3 100644
--- a/pkg/cmd/roachtest/tests/cdc.go
+++ b/pkg/cmd/roachtest/tests/cdc.go
@@ -25,6 +25,7 @@ import (
"net/url"
"path/filepath"
"regexp"
+ "runtime"
"sort"
"strconv"
"strings"
@@ -309,6 +310,9 @@ func cdcBasicTest(ctx context.Context, t test.Test, c cluster.Cluster, args cdcT
}
func runCDCBank(ctx context.Context, t test.Test, c cluster.Cluster) {
+ if runtime.GOARCH == "arm64" {
+ t.Skip("Skipping cdc/bank under ARM64.")
+ }
// Make the logs dir on every node to work around the `roachprod get logs`
// spam.
c.Run(ctx, c.All(), `mkdir -p logs`)
From 9ebd0380da1c07d538f3138fc0a33b9d55734e81 Mon Sep 17 00:00:00 2001
From: sumeerbhola
Date: Mon, 3 Oct 2022 11:07:27 -0400
Subject: [PATCH 12/16] storage: correctly check that a value is a tombstone
We can't rely on the byte slice being of length 0. This was
not a correctness bug, but limits a wider MVCC range tombstone.
Release note: None
---
pkg/storage/mvcc.go | 10 +--
pkg/storage/mvcc_history_test.go | 5 ++
.../delete_range_predicate_continue_tombstone | 72 +++++++++++++++++++
3 files changed, 82 insertions(+), 5 deletions(-)
create mode 100644 pkg/storage/testdata/mvcc_histories/delete_range_predicate_continue_tombstone
diff --git a/pkg/storage/mvcc.go b/pkg/storage/mvcc.go
index 11d02065838d..c528e84ecf08 100644
--- a/pkg/storage/mvcc.go
+++ b/pkg/storage/mvcc.go
@@ -2982,7 +2982,11 @@ func MVCCPredicateDeleteRange(
return false, false, false, roachpb.NewWriteTooOldError(endTime, k.Timestamp.Next(),
k.Key.Clone())
}
- if len(vRaw) == 0 {
+ v, err := DecodeMVCCValue(vRaw)
+ if err != nil {
+ return false, false, false, err
+ }
+ if v.IsTombstone() {
// The latest version of the key is a point tombstone.
return true, true, false, nil
}
@@ -2993,10 +2997,6 @@ func MVCCPredicateDeleteRange(
}
// TODO (msbutler): use MVCCValueHeader to match on job ID predicate
- _, err = DecodeMVCCValue(vRaw)
- if err != nil {
- return false, false, false, err
- }
return true, false, false, nil
}
diff --git a/pkg/storage/mvcc_history_test.go b/pkg/storage/mvcc_history_test.go
index f9b2a81cabcc..08adc1e976f5 100644
--- a/pkg/storage/mvcc_history_test.go
+++ b/pkg/storage/mvcc_history_test.go
@@ -154,6 +154,11 @@ var (
func TestMVCCHistories(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
+ // TODO(storage-team): this prevents us from easily finding bugs which
+ // incorrectly assume simple value encoding. We only find bugs where we are
+ // explicitly using the extended encoding by setting a localTs. One way to
+ // handle the different test output with extended value encoding would be to
+ // duplicate each test file for the two cases.
storage.DisableMetamorphicSimpleValueEncoding(t)
ctx := context.Background()
diff --git a/pkg/storage/testdata/mvcc_histories/delete_range_predicate_continue_tombstone b/pkg/storage/testdata/mvcc_histories/delete_range_predicate_continue_tombstone
new file mode 100644
index 000000000000..da83301d6f09
--- /dev/null
+++ b/pkg/storage/testdata/mvcc_histories/delete_range_predicate_continue_tombstone
@@ -0,0 +1,72 @@
+# Tests that MVCCPredicateDeleteRange will continue a run when encountering
+# tombstones that do not satisfy the predicate.
+# Sets up the following dataset, where x is a tombstone.
+# T
+# 3 a3 d3 f3 g3
+# 2 x x x
+# 1 b1 c1
+# a b c d e f g
+#
+run stats ok
+put k=b ts=1 v=b1
+put k=c ts=1 v=c1
+del k=b ts=2 localTs=1
+del k=c ts=2 localTs=1
+del k=e ts=2 localTs=1
+put k=a ts=3 v=a3
+put k=d ts=3 v=d3
+put k=f ts=3 v=f3
+put k=g ts=3 v=g3
+----
+>> put k=b ts=1 v=b1
+stats: key_count=+1 key_bytes=+14 val_count=+1 val_bytes=+7 live_count=+1 live_bytes=+21
+>> put k=c ts=1 v=c1
+stats: key_count=+1 key_bytes=+14 val_count=+1 val_bytes=+7 live_count=+1 live_bytes=+21
+>> del k=b ts=2 localTs=1
+del: "b": found key true
+stats: key_bytes=+12 val_count=+1 val_bytes=+13 live_count=-1 live_bytes=-21 gc_bytes_age=+4508
+>> del k=c ts=2 localTs=1
+del: "c": found key true
+stats: key_bytes=+12 val_count=+1 val_bytes=+13 live_count=-1 live_bytes=-21 gc_bytes_age=+4508
+>> del k=e ts=2 localTs=1
+del: "e": found key false
+stats: key_count=+1 key_bytes=+14 val_count=+1 val_bytes=+13 gc_bytes_age=+2646
+>> put k=a ts=3 v=a3
+stats: key_count=+1 key_bytes=+14 val_count=+1 val_bytes=+7 live_count=+1 live_bytes=+21
+>> put k=d ts=3 v=d3
+stats: key_count=+1 key_bytes=+14 val_count=+1 val_bytes=+7 live_count=+1 live_bytes=+21
+>> put k=f ts=3 v=f3
+stats: key_count=+1 key_bytes=+14 val_count=+1 val_bytes=+7 live_count=+1 live_bytes=+21
+>> put k=g ts=3 v=g3
+stats: key_count=+1 key_bytes=+14 val_count=+1 val_bytes=+7 live_count=+1 live_bytes=+21
+>> at end:
+data: "a"/3.000000000,0 -> /BYTES/a3
+data: "b"/2.000000000,0 -> {localTs=1.000000000,0}/
+data: "b"/1.000000000,0 -> /BYTES/b1
+data: "c"/2.000000000,0 -> {localTs=1.000000000,0}/
+data: "c"/1.000000000,0 -> /BYTES/c1
+data: "d"/3.000000000,0 -> /BYTES/d3
+data: "e"/2.000000000,0 -> {localTs=1.000000000,0}/
+data: "f"/3.000000000,0 -> /BYTES/f3
+data: "g"/3.000000000,0 -> /BYTES/g3
+stats: key_count=7 key_bytes=122 val_count=9 val_bytes=81 live_count=4 live_bytes=84 gc_bytes_age=11662
+
+# Even though b, c, e do not satisfy the predicate, their latest versions are
+# tombstones, so the run continues and we write [a,g)@4.
+run stats ok
+del_range_pred k=a end=z ts=4 startTime=2 rangeThreshold=3
+----
+>> del_range_pred k=a end=z ts=4 startTime=2 rangeThreshold=3
+stats: range_key_count=+1 range_key_bytes=+14 range_val_count=+1 live_count=-4 live_bytes=-84 gc_bytes_age=+9408
+>> at end:
+rangekey: {a-g\x00}/[4.000000000,0=/]
+data: "a"/3.000000000,0 -> /BYTES/a3
+data: "b"/2.000000000,0 -> {localTs=1.000000000,0}/
+data: "b"/1.000000000,0 -> /BYTES/b1
+data: "c"/2.000000000,0 -> {localTs=1.000000000,0}/
+data: "c"/1.000000000,0 -> /BYTES/c1
+data: "d"/3.000000000,0 -> /BYTES/d3
+data: "e"/2.000000000,0 -> {localTs=1.000000000,0}/
+data: "f"/3.000000000,0 -> /BYTES/f3
+data: "g"/3.000000000,0 -> /BYTES/g3
+stats: key_count=7 key_bytes=122 val_count=9 val_bytes=81 range_key_count=1 range_key_bytes=14 range_val_count=1 gc_bytes_age=21070
From b0470c5dd155a5361cca7b5c7346b0deac3bd010 Mon Sep 17 00:00:00 2001
From: healthy-pod
Date: Wed, 28 Sep 2022 14:58:47 -0700
Subject: [PATCH 13/16] build: support running extra checks on ARM
This code change changes CI scripts for acceptance, bench,
and roachtest to be runnable on ARM machines.
Release note: None
---
build/teamcity/cockroach/ci/tests/acceptance.sh | 11 +++++++++--
build/teamcity/cockroach/ci/tests/bench_impl.sh | 8 +++++++-
.../cockroach/ci/tests/local_roachtest_impl.sh | 10 ++++++++--
3 files changed, 24 insertions(+), 5 deletions(-)
diff --git a/build/teamcity/cockroach/ci/tests/acceptance.sh b/build/teamcity/cockroach/ci/tests/acceptance.sh
index 18ca2c5c0e9e..687743d4090d 100755
--- a/build/teamcity/cockroach/ci/tests/acceptance.sh
+++ b/build/teamcity/cockroach/ci/tests/acceptance.sh
@@ -6,8 +6,15 @@ dir="$(dirname $(dirname $(dirname $(dirname $(dirname "${0}")))))"
source "$dir/teamcity-support.sh"
source "$dir/teamcity-bazel-support.sh" # For run_bazel
+if [[ "$(uname -m)" =~ (arm64|aarch64)$ ]]; then
+ export CROSSLINUX_CONFIG="crosslinuxarm"
+else
+ export CROSSLINUX_CONFIG="crosslinux"
+fi
+
tc_start_block "Build cockroach"
-run_bazel /usr/bin/bash -c 'bazel build --config crosslinux --config ci //pkg/cmd/cockroach-short && cp $(bazel info bazel-bin --config crosslinux --config ci)/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short /artifacts/cockroach && chmod a+w /artifacts/cockroach'
+build_script='bazel build --config $1 --config ci //pkg/cmd/cockroach-short && cp $(bazel info bazel-bin --config $1 --config ci)/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short artifacts/cockroach && chmod a+w artifacts/cockroach'
+run_bazel /usr/bin/bash -c "$build_script" -- "$CROSSLINUX_CONFIG"
tc_end_block "Build cockroach"
export ARTIFACTSDIR=$PWD/artifacts/acceptance
@@ -21,7 +28,7 @@ BAZCI=$(bazel info bazel-bin --config=ci)/pkg/cmd/bazci/bazci_/bazci
$BAZCI --artifacts_dir=$PWD/artifacts -- \
test //pkg/acceptance:acceptance_test \
- --config=crosslinux --config=ci \
+ --config=$CROSSLINUX_CONFIG --config=ci \
"--sandbox_writable_path=$ARTIFACTSDIR" \
"--test_tmpdir=$ARTIFACTSDIR" \
--test_arg=-l="$ARTIFACTSDIR" \
diff --git a/build/teamcity/cockroach/ci/tests/bench_impl.sh b/build/teamcity/cockroach/ci/tests/bench_impl.sh
index 508649f13102..da513f574510 100755
--- a/build/teamcity/cockroach/ci/tests/bench_impl.sh
+++ b/build/teamcity/cockroach/ci/tests/bench_impl.sh
@@ -5,6 +5,12 @@ set -euo pipefail
dir="$(dirname $(dirname $(dirname $(dirname $(dirname "${0}")))))"
source "$dir/teamcity/util.sh"
+if [[ "$(uname -m)" =~ (arm64|aarch64)$ ]]; then
+ export CROSSLINUX_CONFIG="crosslinuxarm"
+else
+ export CROSSLINUX_CONFIG="crosslinux"
+fi
+
# Enumerate test targets that have benchmarks.
all_tests=$(bazel query 'kind(go_test, //pkg/...)' --output=label)
pkgs=$(git grep -l '^func Benchmark' -- 'pkg/*_test.go' | rev | cut -d/ -f2- | rev | sort | uniq)
@@ -21,7 +27,7 @@ do
tc_start_block "Bench $target"
# We need the `test_sharding_strategy` flag or else the benchmarks will
# fail to run sharded tests like //pkg/sql/importer:importer_test.
- bazel run --config=test --config=crosslinux --config=ci --test_sharding_strategy=disabled $target -- \
+ bazel run --config=test --config=$CROSSLINUX_CONFIG --config=ci --test_sharding_strategy=disabled $target -- \
-test.bench=. -test.benchtime=1ns -test.short -test.run=-
tc_end_block "Bench $target"
done
diff --git a/build/teamcity/cockroach/ci/tests/local_roachtest_impl.sh b/build/teamcity/cockroach/ci/tests/local_roachtest_impl.sh
index 91a5f5b3ac71..65027e10b649 100755
--- a/build/teamcity/cockroach/ci/tests/local_roachtest_impl.sh
+++ b/build/teamcity/cockroach/ci/tests/local_roachtest_impl.sh
@@ -2,11 +2,17 @@
set -euo pipefail
-bazel build --config=crosslinux --config=ci //pkg/cmd/cockroach-short \
+if [[ "$(uname -m)" =~ (arm64|aarch64)$ ]]; then
+ export CROSSLINUX_CONFIG="crosslinuxarm"
+else
+ export CROSSLINUX_CONFIG="crosslinux"
+fi
+
+bazel build --config=$CROSSLINUX_CONFIG --config=ci //pkg/cmd/cockroach-short \
//pkg/cmd/roachtest \
//pkg/cmd/workload
-BAZEL_BIN=$(bazel info bazel-bin --config=crosslinux --config=ci)
+BAZEL_BIN=$(bazel info bazel-bin --config=$CROSSLINUX_CONFIG --config=ci)
$BAZEL_BIN/pkg/cmd/roachtest/roachtest_/roachtest run acceptance kv/splits cdc/bank \
--local \
--parallelism=1 \
From bfbef33a4dcb9663c10326ded46c5abfeb812f0e Mon Sep 17 00:00:00 2001
From: Yahor Yuzefovich
Date: Fri, 30 Sep 2022 16:29:48 -0700
Subject: [PATCH 14/16] stmtdiagnostics: remove conditional request from
registry after completion
Previously, we had a minor bug in how we handle the conditional
diagnostics requests when we got a bundle that satisfied the condition - we
correctly updated the corresponding system table, but we forgot to remove
the request from the local registry. As a result, we would continue
collecting conditional bundles until the local node polls the system
table and updates its registry (every 10 seconds by default). This
commit fixes that issue. Additionally, this commit updates the tests to
enforce that the in-memory registry doesn't contain completed requests.
Release note: None
---
pkg/sql/instrumentation.go | 7 ++-
.../stmtdiagnostics/statement_diagnostics.go | 54 +++++++++----------
.../statement_diagnostics_helpers_test.go | 4 +-
.../statement_diagnostics_test.go | 17 ++++--
4 files changed, 46 insertions(+), 36 deletions(-)
diff --git a/pkg/sql/instrumentation.go b/pkg/sql/instrumentation.go
index 14ddffdc608e..4f30c6047e71 100644
--- a/pkg/sql/instrumentation.go
+++ b/pkg/sql/instrumentation.go
@@ -363,9 +363,8 @@ func (ih *instrumentationHelper) Finish(
p.SessionData(),
)
phaseTimes := statsCollector.PhaseTimes()
- if ih.stmtDiagnosticsRecorder.IsExecLatencyConditionMet(
- ih.diagRequestID, ih.diagRequest, phaseTimes.GetServiceLatencyNoOverhead(),
- ) {
+ execLatency := phaseTimes.GetServiceLatencyNoOverhead()
+ if ih.stmtDiagnosticsRecorder.IsConditionSatisfied(ih.diagRequest, execLatency) {
placeholders := p.extendedEvalCtx.Placeholders
ob := ih.emitExplainAnalyzePlanToOutputBuilder(
explain.Flags{Verbose: true, ShowTypes: true},
@@ -377,9 +376,9 @@ func (ih *instrumentationHelper) Finish(
ctx, cfg.DB, ie.(*InternalExecutor), &p.curPlan, ob.BuildString(), trace, placeholders,
)
bundle.insert(ctx, ih.fingerprint, ast, cfg.StmtDiagnosticsRecorder, ih.diagRequestID, ih.diagRequest)
- ih.stmtDiagnosticsRecorder.RemoveOngoing(ih.diagRequestID, ih.diagRequest)
telemetry.Inc(sqltelemetry.StatementDiagnosticsCollectedCounter)
}
+ ih.stmtDiagnosticsRecorder.MaybeRemoveRequest(ih.diagRequestID, ih.diagRequest, execLatency)
}
// If there was a communication error already, no point in setting any
diff --git a/pkg/sql/stmtdiagnostics/statement_diagnostics.go b/pkg/sql/stmtdiagnostics/statement_diagnostics.go
index 77e8ecb96eac..c8a07e73947d 100644
--- a/pkg/sql/stmtdiagnostics/statement_diagnostics.go
+++ b/pkg/sql/stmtdiagnostics/statement_diagnostics.go
@@ -128,6 +128,14 @@ func (r *Request) isConditional() bool {
return r.minExecutionLatency != 0
}
+// continueCollecting returns true if we want to continue collecting bundles for
+// this request.
+func (r *Request) continueCollecting(st *cluster.Settings) bool {
+ return collectUntilExpiration.Get(&st.SV) && // continuous collection must be enabled
+ r.samplingProbability != 0 && !r.expiresAt.IsZero() && // conditions for continuous collection must be set
+ !r.isExpired(timeutil.Now()) // the request must not have expired yet
+}
+
// NewRegistry constructs a new Registry.
func NewRegistry(ie sqlutil.InternalExecutor, db *kv.DB, st *cluster.Settings) *Registry {
r := &Registry{
@@ -410,35 +418,28 @@ func (r *Registry) CancelRequest(ctx context.Context, requestID int64) error {
return nil
}
-// IsExecLatencyConditionMet returns true if the completed request's execution
-// latency satisfies the request's condition. If false is returned, it inlines
-// the logic of RemoveOngoing.
-func (r *Registry) IsExecLatencyConditionMet(
- requestID RequestID, req Request, execLatency time.Duration,
-) bool {
- if req.minExecutionLatency <= execLatency {
- return true
- }
- // This is a conditional request and the condition is not satisfied, so we
- // only need to remove the request if it has expired.
- if req.isExpired(timeutil.Now()) {
- r.mu.Lock()
- defer r.mu.Unlock()
- delete(r.mu.requestFingerprints, requestID)
- }
- return false
+// IsConditionSatisfied returns whether the completed request satisfies its
+// condition.
+func (r *Registry) IsConditionSatisfied(req Request, execLatency time.Duration) bool {
+ return req.minExecutionLatency <= execLatency
}
-// RemoveOngoing removes the given request from the list of ongoing queries.
-func (r *Registry) RemoveOngoing(requestID RequestID, req Request) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if req.isConditional() {
- if req.isExpired(timeutil.Now()) {
+// MaybeRemoveRequest checks whether the request needs to be removed from the
+// local Registry and removes it if so. Note that the registries on other nodes
+// will learn about it via polling of the system table.
+func (r *Registry) MaybeRemoveRequest(requestID RequestID, req Request, execLatency time.Duration) {
+ // We should remove the request from the registry if its condition is
+ // satisfied unless we want to continue collecting bundles for this request.
+ shouldRemove := r.IsConditionSatisfied(req, execLatency) && !req.continueCollecting(r.st)
+ // Always remove the expired requests.
+ if shouldRemove || req.isExpired(timeutil.Now()) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if req.isConditional() {
delete(r.mu.requestFingerprints, requestID)
+ } else {
+ delete(r.mu.unconditionalOngoing, requestID)
}
- } else {
- delete(r.mu.unconditionalOngoing, requestID)
}
}
@@ -448,8 +449,7 @@ func (r *Registry) RemoveOngoing(requestID RequestID, req Request) {
// case ShouldCollectDiagnostics will return true again on this node for the
// same diagnostics request only for conditional requests.
//
-// If shouldCollect is true, RemoveOngoing needs to be called (which is inlined
-// by IsExecLatencyConditionMet when that returns false).
+// If shouldCollect is true, MaybeRemoveRequest needs to be called.
func (r *Registry) ShouldCollectDiagnostics(
ctx context.Context, fingerprint string,
) (shouldCollect bool, reqID RequestID, req Request) {
diff --git a/pkg/sql/stmtdiagnostics/statement_diagnostics_helpers_test.go b/pkg/sql/stmtdiagnostics/statement_diagnostics_helpers_test.go
index d8ec19eba234..541b3414c94b 100644
--- a/pkg/sql/stmtdiagnostics/statement_diagnostics_helpers_test.go
+++ b/pkg/sql/stmtdiagnostics/statement_diagnostics_helpers_test.go
@@ -16,10 +16,10 @@ import (
)
// TestingFindRequest exports findRequest for testing purposes.
-func (r *Registry) TestingFindRequest(requestID RequestID) bool {
+func (r *Registry) TestingFindRequest(requestID int64) bool {
r.mu.Lock()
defer r.mu.Unlock()
- return r.findRequestLocked(requestID)
+ return r.findRequestLocked(RequestID(requestID))
}
// InsertRequestInternal exposes the form of insert which returns the request ID
diff --git a/pkg/sql/stmtdiagnostics/statement_diagnostics_test.go b/pkg/sql/stmtdiagnostics/statement_diagnostics_test.go
index a9cbbd92d0f4..81d71d5d9a15 100644
--- a/pkg/sql/stmtdiagnostics/statement_diagnostics_test.go
+++ b/pkg/sql/stmtdiagnostics/statement_diagnostics_test.go
@@ -45,13 +45,24 @@ func TestDiagnosticsRequest(t *testing.T) {
s, db, _ := serverutils.StartServer(t, base.TestServerArgs{})
ctx := context.Background()
defer s.Stopper().Stop(ctx)
+ registry := s.ExecutorConfig().(sql.ExecutorConfig).StmtDiagnosticsRecorder
_, err := db.Exec("CREATE TABLE test (x int PRIMARY KEY)")
require.NoError(t, err)
- completedQuery := "SELECT completed, statement_diagnostics_id FROM system.statement_diagnostics_requests WHERE ID = $1"
+ var collectUntilExpirationEnabled bool
isCompleted := func(reqID int64) (completed bool, diagnosticsID gosql.NullInt64) {
+ completedQuery := "SELECT completed, statement_diagnostics_id FROM system.statement_diagnostics_requests WHERE ID = $1"
reqRow := db.QueryRow(completedQuery, reqID)
require.NoError(t, reqRow.Scan(&completed, &diagnosticsID))
+ if completed && !collectUntilExpirationEnabled {
+ // Ensure that if the request was completed and the continuous
+ // collection is not enabled, the local registry no longer has the
+ // request.
+ require.False(
+ t, registry.TestingFindRequest(reqID), "request was "+
+ "completed and should have been removed from the registry",
+ )
+ }
return completed, diagnosticsID
}
checkNotCompleted := func(reqID int64) {
@@ -76,6 +87,7 @@ func TestDiagnosticsRequest(t *testing.T) {
return nil
}
setCollectUntilExpiration := func(v bool) {
+ collectUntilExpirationEnabled = v
_, err := db.Exec(
fmt.Sprintf("SET CLUSTER SETTING sql.stmt_diagnostics.collect_continuously.enabled = %t", v))
require.NoError(t, err)
@@ -86,7 +98,6 @@ func TestDiagnosticsRequest(t *testing.T) {
require.NoError(t, err)
}
- registry := s.ExecutorConfig().(sql.ExecutorConfig).StmtDiagnosticsRecorder
var minExecutionLatency, expiresAfter time.Duration
var samplingProbability float64
@@ -445,7 +456,7 @@ func TestDiagnosticsRequest(t *testing.T) {
// We should not find the request and a subsequent executions should not
// capture anything.
testutils.SucceedsSoon(t, func() error {
- if found := registry.TestingFindRequest(stmtdiagnostics.RequestID(reqID)); found {
+ if found := registry.TestingFindRequest(reqID); found {
return errors.New("expected expired request to no longer be tracked")
}
return nil
From 37a0080f8e97801efd56e8cf8c989c29e0449829 Mon Sep 17 00:00:00 2001
From: Rebecca Taft
Date: Mon, 3 Oct 2022 10:51:55 -0500
Subject: [PATCH 15/16] roachtest: disable decimal columns in costfuzz and
unoptimized tests
The use of decimal columns was making costfuzz and unoptimized-query-oracle
tests flaky. This commit disables generation of decimal columns as a temporary
mitigation for these flakes.
Fixes #88547
Release note: None
---
pkg/cmd/roachtest/tests/query_comparison_util.go | 2 +-
pkg/cmd/smith/main.go | 1 +
pkg/internal/sqlsmith/scalar.go | 3 +++
pkg/internal/sqlsmith/sqlsmith.go | 6 ++++++
pkg/internal/sqlsmith/type.go | 16 ++++++++++++++--
5 files changed, 25 insertions(+), 3 deletions(-)
diff --git a/pkg/cmd/roachtest/tests/query_comparison_util.go b/pkg/cmd/roachtest/tests/query_comparison_util.go
index d042de6ad5a1..86cadc9698b2 100644
--- a/pkg/cmd/roachtest/tests/query_comparison_util.go
+++ b/pkg/cmd/roachtest/tests/query_comparison_util.go
@@ -177,7 +177,7 @@ func runOneRoundQueryComparison(
sqlsmith.DisableMutations(), sqlsmith.DisableNondeterministicFns(), sqlsmith.DisableLimits(),
sqlsmith.UnlikelyConstantPredicate(), sqlsmith.FavorCommonData(),
sqlsmith.UnlikelyRandomNulls(), sqlsmith.DisableCrossJoins(),
- sqlsmith.DisableIndexHints(), sqlsmith.DisableWith(),
+ sqlsmith.DisableIndexHints(), sqlsmith.DisableWith(), sqlsmith.DisableDecimals(),
sqlsmith.LowProbabilityWhereClauseWithJoinTables(),
sqlsmith.SetComplexity(.3),
sqlsmith.SetScalarComplexity(.1),
diff --git a/pkg/cmd/smith/main.go b/pkg/cmd/smith/main.go
index a0f52936aedb..e5058155fe84 100644
--- a/pkg/cmd/smith/main.go
+++ b/pkg/cmd/smith/main.go
@@ -73,6 +73,7 @@ var (
"CompareMode": sqlsmith.CompareMode(),
"PostgresMode": sqlsmith.PostgresMode(),
"MutatingMode": sqlsmith.MutatingMode(),
+ "DisableDecimals": sqlsmith.DisableDecimals(),
}
smitherOpts []string
)
diff --git a/pkg/internal/sqlsmith/scalar.go b/pkg/internal/sqlsmith/scalar.go
index 06a13f7adcba..3cd6c63747bd 100644
--- a/pkg/internal/sqlsmith/scalar.go
+++ b/pkg/internal/sqlsmith/scalar.go
@@ -223,6 +223,9 @@ func getColRef(s *Smither, typ *types.T, refs colRefs) (tree.TypedExpr, *colRef,
return nil, nil, false
}
col := cols[s.rnd.Intn(len(cols))]
+ if s.disableDecimals && col.typ.Family() == types.DecimalFamily {
+ return nil, nil, false
+ }
return col.typedExpr(), col, true
}
diff --git a/pkg/internal/sqlsmith/sqlsmith.go b/pkg/internal/sqlsmith/sqlsmith.go
index d902500bd910..383fbd8d8f6e 100644
--- a/pkg/internal/sqlsmith/sqlsmith.go
+++ b/pkg/internal/sqlsmith/sqlsmith.go
@@ -98,6 +98,7 @@ type Smither struct {
lowProbWhereWithJoinTables bool
disableInsertSelect bool
disableDivision bool
+ disableDecimals bool
bulkSrv *httptest.Server
bulkFiles map[string][]byte
@@ -444,6 +445,11 @@ var DisableDivision = simpleOption("disable division", func(s *Smither) {
s.disableDivision = true
})
+// DisableDecimals disables use of decimal type columns in the query.
+var DisableDecimals = simpleOption("disable decimals", func(s *Smither) {
+ s.disableDecimals = true
+})
+
// CompareMode causes the Smither to generate statements that have
// deterministic output.
var CompareMode = multiOption(
diff --git a/pkg/internal/sqlsmith/type.go b/pkg/internal/sqlsmith/type.go
index e273a0d75ff1..df455b5328c9 100644
--- a/pkg/internal/sqlsmith/type.go
+++ b/pkg/internal/sqlsmith/type.go
@@ -54,7 +54,13 @@ func (s *Smither) randScalarType() *types.T {
if s.types != nil {
scalarTypes = s.types.scalarTypes
}
- return randgen.RandTypeFromSlice(s.rnd, scalarTypes)
+ typ := randgen.RandTypeFromSlice(s.rnd, scalarTypes)
+ if s.disableDecimals {
+ for typ.Family() == types.DecimalFamily {
+ typ = randgen.RandTypeFromSlice(s.rnd, scalarTypes)
+ }
+ }
+ return typ
}
// isScalarType returns true if t is a member of types.Scalar, or a user defined
@@ -81,7 +87,13 @@ func (s *Smither) randType() *types.T {
if s.types != nil {
seedTypes = s.types.seedTypes
}
- return randgen.RandTypeFromSlice(s.rnd, seedTypes)
+ typ := randgen.RandTypeFromSlice(s.rnd, seedTypes)
+ if s.disableDecimals {
+ for typ.Family() == types.DecimalFamily {
+ typ = randgen.RandTypeFromSlice(s.rnd, seedTypes)
+ }
+ }
+ return typ
}
func (s *Smither) makeDesiredTypes() []*types.T {
From 29ce5b1239d368de992e2f02afc6ccdee1cbc817 Mon Sep 17 00:00:00 2001
From: Shiranka Miskin
Date: Mon, 3 Oct 2022 12:08:03 +0000
Subject: [PATCH 16/16] changefeedccl: verify changefeed failure for reverted
import
Resolves #82943
A new test verifies that even if an import was reverted the changefeed
still fails on a table offline error. This is now needed to ensure we
don't have undesired behavior during a case where rangefeeds would emit
range tombstones.
Release note: None
---
pkg/ccl/changefeedccl/changefeed_test.go | 69 ++++++++++++++++++------
1 file changed, 53 insertions(+), 16 deletions(-)
diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go
index 69434151b5b2..e3e3f98780d3 100644
--- a/pkg/ccl/changefeedccl/changefeed_test.go
+++ b/pkg/ccl/changefeedccl/changefeed_test.go
@@ -2648,25 +2648,62 @@ func TestChangefeedFailOnTableOffline(t *testing.T) {
}))
defer dataSrv.Close()
- testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) {
+ cdcTestNamed(t, "import fails changefeed", func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(s.DB)
sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'")
- t.Run("import fails changefeed", func(t *testing.T) {
- sqlDB.Exec(t, `CREATE TABLE for_import (a INT PRIMARY KEY, b INT)`)
- defer sqlDB.Exec(t, `DROP TABLE for_import`)
- sqlDB.Exec(t, `INSERT INTO for_import VALUES (0, NULL)`)
- forImport := feed(t, f, `CREATE CHANGEFEED FOR for_import `)
- defer closeFeed(t, forImport)
- assertPayloads(t, forImport, []string{
- `for_import: [0]->{"after": {"a": 0, "b": null}}`,
- })
- sqlDB.Exec(t, `IMPORT INTO for_import CSV DATA ($1)`, dataSrv.URL)
- requireErrorSoon(context.Background(), t, forImport,
- regexp.MustCompile(`CHANGEFEED cannot target offline table: for_import \(offline reason: "importing"\)`))
- })
- }
+ sqlDB.Exec(t, `CREATE TABLE for_import (a INT PRIMARY KEY, b INT)`)
+ defer sqlDB.Exec(t, `DROP TABLE for_import`)
+ sqlDB.Exec(t, `INSERT INTO for_import VALUES (0, NULL)`)
+ forImport := feed(t, f, `CREATE CHANGEFEED FOR for_import `)
+ defer closeFeed(t, forImport)
+ assertPayloads(t, forImport, []string{
+ `for_import: [0]->{"after": {"a": 0, "b": null}}`,
+ })
+ sqlDB.Exec(t, `IMPORT INTO for_import CSV DATA ($1)`, dataSrv.URL)
+ requireErrorSoon(context.Background(), t, forImport,
+ regexp.MustCompile(`CHANGEFEED cannot target offline table: for_import \(offline reason: "importing"\)`))
+ })
- cdcTest(t, testFn)
+ cdcTestNamedWithSystem(t, "reverted import fails changefeed with earlier cursor", func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) {
+ sysSQLDB := sqlutils.MakeSQLRunner(s.SystemDB)
+ sysSQLDB.Exec(t, "SET CLUSTER SETTING kv.bulk_io_write.small_write_size = '1'")
+ sysSQLDB.Exec(t, "SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true")
+
+ sqlDB := sqlutils.MakeSQLRunner(s.DB)
+ sqlDB.Exec(t, `CREATE TABLE for_import (a INT PRIMARY KEY, b INT)`)
+
+ var start string
+ sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&start)
+ sqlDB.Exec(t, "INSERT INTO for_import VALUES (0, 10);")
+
+ // Start an import job which will immediately pause after ingestion
+ sqlDB.Exec(t, "SET CLUSTER SETTING jobs.debug.pausepoints = 'import.after_ingest';")
+ go func() {
+ sqlDB.ExpectErr(t, `pause point`, `IMPORT INTO for_import CSV DATA ($1);`, dataSrv.URL)
+ }()
+ sqlDB.CheckQueryResultsRetry(
+ t,
+ fmt.Sprintf(`SELECT count(*) FROM [SHOW JOBS] WHERE job_type='IMPORT' AND status='%s'`, jobs.StatusPaused),
+ [][]string{{"1"}},
+ )
+
+ // Cancel to trigger a revert and verify revert completion
+ var jobID string
+ sqlDB.QueryRow(t, `SELECT job_id FROM [SHOW JOBS] where job_type='IMPORT'`).Scan(&jobID)
+ sqlDB.Exec(t, `CANCEL JOB $1`, jobID)
+ sqlDB.CheckQueryResultsRetry(
+ t,
+ fmt.Sprintf(`SELECT count(*) FROM [SHOW JOBS] WHERE job_type='IMPORT' AND status='%s'`, jobs.StatusCanceled),
+ [][]string{{"1"}},
+ )
+ sqlDB.CheckQueryResultsRetry(t, "SELECT count(*) FROM for_import", [][]string{{"1"}})
+
+ // Changefeed should fail regardless
+ forImport := feed(t, f, `CREATE CHANGEFEED FOR for_import WITH cursor=$1`, start)
+ defer closeFeed(t, forImport)
+ requireErrorSoon(context.Background(), t, forImport,
+ regexp.MustCompile(`CHANGEFEED cannot target offline table: for_import \(offline reason: "importing"\)`))
+ })
}
func TestChangefeedRestartMultiNode(t *testing.T) {
| | | | | | | | | | | | | | | | | |